xref: /qemu/system/physmem.c (revision d24981d37e793b0a8fcde1879db19eb11fe0f975)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
3274576198Saliguori #include "osdep.h"
337ba1e619Saliguori #include "kvm.h"
34432d268cSJun Nakajima #include "hw/xen.h"
3529e922b6SBlue Swirl #include "qemu-timer.h"
3662152b8aSAvi Kivity #include "memory.h"
3762152b8aSAvi Kivity #include "exec-memory.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3953a5960aSpbrook #include <qemu.h>
40f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41f01576f1SJuergen Lock #include <sys/param.h>
42f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
43f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
44f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
45f01576f1SJuergen Lock #include <sys/time.h>
46f01576f1SJuergen Lock #include <sys/proc.h>
47f01576f1SJuergen Lock #include <machine/profile.h>
48f01576f1SJuergen Lock #define _KERNEL
49f01576f1SJuergen Lock #include <sys/user.h>
50f01576f1SJuergen Lock #undef _KERNEL
51f01576f1SJuergen Lock #undef sigqueue
52f01576f1SJuergen Lock #include <libutil.h>
53f01576f1SJuergen Lock #endif
54f01576f1SJuergen Lock #endif
55432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
56432d268cSJun Nakajima #include "xen-mapcache.h"
576506e4f9SStefano Stabellini #include "trace.h"
5853a5960aSpbrook #endif
5954936004Sbellard 
600cac1b66SBlue Swirl #include "cputlb.h"
610cac1b66SBlue Swirl 
6267d95c15SAvi Kivity #define WANT_EXEC_OBSOLETE
6367d95c15SAvi Kivity #include "exec-obsolete.h"
6467d95c15SAvi Kivity 
65fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6666e85a21Sbellard //#define DEBUG_FLUSH
6767d3b957Spbrook //#define DEBUG_UNASSIGNED
68fd6ce8f6Sbellard 
69fd6ce8f6Sbellard /* make various TB consistency checks */
70fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
71fd6ce8f6Sbellard 
721196be37Sths //#define DEBUG_IOPORT
73db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
741196be37Sths 
7599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7699773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7799773bd4Spbrook #undef DEBUG_TB_CHECK
7899773bd4Spbrook #endif
7999773bd4Spbrook 
809fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
819fa3e853Sbellard 
82bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8324ab68acSStefan Weil static int code_gen_max_blocks;
849fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85bdaf78e0Sblueswir1 static int nb_tbs;
86eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
87c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88fd6ce8f6Sbellard 
89141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
90141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
92d03d860bSblueswir1  section close to code segment. */
93d03d860bSblueswir1 #define code_gen_section                                \
94d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
95d03d860bSblueswir1     __attribute__((aligned (32)))
966840981dSStefan Weil #elif defined(_WIN32) && !defined(_WIN64)
97f8e2af11SStefan Weil #define code_gen_section                                \
98f8e2af11SStefan Weil     __attribute__((aligned (16)))
99d03d860bSblueswir1 #else
100d03d860bSblueswir1 #define code_gen_section                                \
101d03d860bSblueswir1     __attribute__((aligned (32)))
102d03d860bSblueswir1 #endif
103d03d860bSblueswir1 
104d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
105bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
106bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10726a5f13bSbellard /* threshold to flush the translated code buffer */
108bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
10924ab68acSStefan Weil static uint8_t *code_gen_ptr;
110fd6ce8f6Sbellard 
111e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1129fa3e853Sbellard int phys_ram_fd;
11374576198Saliguori static int in_migration;
11494a6b54fSpbrook 
11585d59fefSPaolo Bonzini RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
11662152b8aSAvi Kivity 
11762152b8aSAvi Kivity static MemoryRegion *system_memory;
118309cb471SAvi Kivity static MemoryRegion *system_io;
11962152b8aSAvi Kivity 
1200e0df1e2SAvi Kivity MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121de712f94SAvi Kivity static MemoryRegion io_mem_subpage_ram;
1220e0df1e2SAvi Kivity 
123e2eef170Spbrook #endif
1249fa3e853Sbellard 
1259349b4f9SAndreas Färber CPUArchState *first_cpu;
1266a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1276a00d601Sbellard    cpu_exec() */
1289349b4f9SAndreas Färber DEFINE_TLS(CPUArchState *,cpu_single_env);
1292e70f6efSpbrook /* 0 = Do not count executed instructions.
130bf20dc07Sths    1 = Precise instruction counting.
1312e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1322e70f6efSpbrook int use_icount = 0;
1336a00d601Sbellard 
13454936004Sbellard typedef struct PageDesc {
13592e873b9Sbellard     /* list of TBs intersecting this ram page */
136fd6ce8f6Sbellard     TranslationBlock *first_tb;
1379fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1389fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1399fa3e853Sbellard     unsigned int code_write_count;
1409fa3e853Sbellard     uint8_t *code_bitmap;
1419fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1429fa3e853Sbellard     unsigned long flags;
1439fa3e853Sbellard #endif
14454936004Sbellard } PageDesc;
14554936004Sbellard 
14641c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1475cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1485cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14941c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
15041c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
15141c1b1c9SPaul Brook #else
1525cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
15341c1b1c9SPaul Brook #endif
154bedb69eaSj_mayer #else
1555cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156bedb69eaSj_mayer #endif
15754936004Sbellard 
1585cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1595cd2c5b6SRichard Henderson #define L2_BITS 10
16054936004Sbellard #define L2_SIZE (1 << L2_BITS)
16154936004Sbellard 
1623eef53dfSAvi Kivity #define P_L2_LEVELS \
1633eef53dfSAvi Kivity     (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
1643eef53dfSAvi Kivity 
1655cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1665cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1675cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1685cd2c5b6SRichard Henderson 
1695cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1705cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1715cd2c5b6SRichard Henderson #else
1725cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1735cd2c5b6SRichard Henderson #endif
1745cd2c5b6SRichard Henderson 
1755cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1765cd2c5b6SRichard Henderson 
1775cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1785cd2c5b6SRichard Henderson 
179c6d50674SStefan Weil uintptr_t qemu_real_host_page_size;
180c6d50674SStefan Weil uintptr_t qemu_host_page_size;
181c6d50674SStefan Weil uintptr_t qemu_host_page_mask;
18254936004Sbellard 
1835cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1845cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1855cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18654936004Sbellard 
187e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1884346ae3eSAvi Kivity typedef struct PhysPageEntry PhysPageEntry;
1894346ae3eSAvi Kivity 
1905312bd8bSAvi Kivity static MemoryRegionSection *phys_sections;
1915312bd8bSAvi Kivity static unsigned phys_sections_nb, phys_sections_nb_alloc;
1925312bd8bSAvi Kivity static uint16_t phys_section_unassigned;
193aa102231SAvi Kivity static uint16_t phys_section_notdirty;
194aa102231SAvi Kivity static uint16_t phys_section_rom;
195aa102231SAvi Kivity static uint16_t phys_section_watch;
1965312bd8bSAvi Kivity 
1974346ae3eSAvi Kivity struct PhysPageEntry {
19807f07b31SAvi Kivity     uint16_t is_leaf : 1;
19907f07b31SAvi Kivity      /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
20007f07b31SAvi Kivity     uint16_t ptr : 15;
2014346ae3eSAvi Kivity };
2024346ae3eSAvi Kivity 
203d6f2ea22SAvi Kivity /* Simple allocator for PhysPageEntry nodes */
204d6f2ea22SAvi Kivity static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205d6f2ea22SAvi Kivity static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206d6f2ea22SAvi Kivity 
20707f07b31SAvi Kivity #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208d6f2ea22SAvi Kivity 
2095cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
21006ef3525SAvi Kivity    The bottom level has pointers to MemoryRegionSections.  */
21107f07b31SAvi Kivity static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
2126d9a1304SPaul Brook 
213e2eef170Spbrook static void io_mem_init(void);
21462152b8aSAvi Kivity static void memory_map_init(void);
215e2eef170Spbrook 
2161ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
2176658ffb8Spbrook #endif
21833417e70Sbellard 
219e3db7226Sbellard /* statistics */
220e3db7226Sbellard static int tb_flush_count;
221e3db7226Sbellard static int tb_phys_invalidate_count;
222e3db7226Sbellard 
2237cb69caeSbellard #ifdef _WIN32
2247cb69caeSbellard static void map_exec(void *addr, long size)
2257cb69caeSbellard {
2267cb69caeSbellard     DWORD old_protect;
2277cb69caeSbellard     VirtualProtect(addr, size,
2287cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2297cb69caeSbellard 
2307cb69caeSbellard }
2317cb69caeSbellard #else
2327cb69caeSbellard static void map_exec(void *addr, long size)
2337cb69caeSbellard {
2344369415fSbellard     unsigned long start, end, page_size;
2357cb69caeSbellard 
2364369415fSbellard     page_size = getpagesize();
2377cb69caeSbellard     start = (unsigned long)addr;
2384369415fSbellard     start &= ~(page_size - 1);
2397cb69caeSbellard 
2407cb69caeSbellard     end = (unsigned long)addr + size;
2414369415fSbellard     end += page_size - 1;
2424369415fSbellard     end &= ~(page_size - 1);
2437cb69caeSbellard 
2447cb69caeSbellard     mprotect((void *)start, end - start,
2457cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2467cb69caeSbellard }
2477cb69caeSbellard #endif
2487cb69caeSbellard 
249b346ff46Sbellard static void page_init(void)
25054936004Sbellard {
25183fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
25254936004Sbellard        TARGET_PAGE_SIZE */
253c2b48b69Saliguori #ifdef _WIN32
254c2b48b69Saliguori     {
255c2b48b69Saliguori         SYSTEM_INFO system_info;
256c2b48b69Saliguori 
257c2b48b69Saliguori         GetSystemInfo(&system_info);
258c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
259c2b48b69Saliguori     }
260c2b48b69Saliguori #else
261c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
262c2b48b69Saliguori #endif
26383fb7adfSbellard     if (qemu_host_page_size == 0)
26483fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26583fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
26683fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
26783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
26850a9569bSbalrog 
2692e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27050a9569bSbalrog     {
271f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
272f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
273f01576f1SJuergen Lock         int i, cnt;
274f01576f1SJuergen Lock 
275f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
276f01576f1SJuergen Lock         if (freep) {
277f01576f1SJuergen Lock             mmap_lock();
278f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
279f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
280f01576f1SJuergen Lock 
281f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
282f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
283f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
284f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285f01576f1SJuergen Lock 
286f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
287f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
288fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
289f01576f1SJuergen Lock                     } else {
290f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291f01576f1SJuergen Lock                         endaddr = ~0ul;
292fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293f01576f1SJuergen Lock #endif
294f01576f1SJuergen Lock                     }
295f01576f1SJuergen Lock                 }
296f01576f1SJuergen Lock             }
297f01576f1SJuergen Lock             free(freep);
298f01576f1SJuergen Lock             mmap_unlock();
299f01576f1SJuergen Lock         }
300f01576f1SJuergen Lock #else
30150a9569bSbalrog         FILE *f;
30250a9569bSbalrog 
3030776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3045cd2c5b6SRichard Henderson 
305fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
30650a9569bSbalrog         if (f) {
3075cd2c5b6SRichard Henderson             mmap_lock();
3085cd2c5b6SRichard Henderson 
30950a9569bSbalrog             do {
3105cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3115cd2c5b6SRichard Henderson                 int n;
3125cd2c5b6SRichard Henderson 
3135cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3145cd2c5b6SRichard Henderson 
3155cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3165cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3175cd2c5b6SRichard Henderson 
3185cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3195cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3205cd2c5b6SRichard Henderson                     } else {
3215cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3225cd2c5b6SRichard Henderson                     }
3235cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
32450a9569bSbalrog                 }
32550a9569bSbalrog             } while (!feof(f));
3265cd2c5b6SRichard Henderson 
32750a9569bSbalrog             fclose(f);
328c8a706feSpbrook             mmap_unlock();
32950a9569bSbalrog         }
330f01576f1SJuergen Lock #endif
3315cd2c5b6SRichard Henderson     }
33250a9569bSbalrog #endif
33354936004Sbellard }
33454936004Sbellard 
33541c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
33654936004Sbellard {
33741c1b1c9SPaul Brook     PageDesc *pd;
33841c1b1c9SPaul Brook     void **lp;
33941c1b1c9SPaul Brook     int i;
34041c1b1c9SPaul Brook 
34117e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3427267c094SAnthony Liguori     /* We can't use g_malloc because it may recurse into a locked mutex. */
3435cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3445cd2c5b6SRichard Henderson     do {                                                \
3455cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3465cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3475cd2c5b6SRichard Henderson     } while (0)
3485cd2c5b6SRichard Henderson #else
3495cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3507267c094SAnthony Liguori     do { P = g_malloc0(SIZE); } while (0)
3515cd2c5b6SRichard Henderson #endif
3525cd2c5b6SRichard Henderson 
3535cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3545cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3555cd2c5b6SRichard Henderson 
3565cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3575cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3585cd2c5b6SRichard Henderson         void **p = *lp;
3595cd2c5b6SRichard Henderson 
3605cd2c5b6SRichard Henderson         if (p == NULL) {
3615cd2c5b6SRichard Henderson             if (!alloc) {
3625cd2c5b6SRichard Henderson                 return NULL;
3635cd2c5b6SRichard Henderson             }
3645cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
36554936004Sbellard             *lp = p;
3665cd2c5b6SRichard Henderson         }
3675cd2c5b6SRichard Henderson 
3685cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3695cd2c5b6SRichard Henderson     }
3705cd2c5b6SRichard Henderson 
3715cd2c5b6SRichard Henderson     pd = *lp;
3725cd2c5b6SRichard Henderson     if (pd == NULL) {
3735cd2c5b6SRichard Henderson         if (!alloc) {
3745cd2c5b6SRichard Henderson             return NULL;
3755cd2c5b6SRichard Henderson         }
3765cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3775cd2c5b6SRichard Henderson         *lp = pd;
3785cd2c5b6SRichard Henderson     }
3795cd2c5b6SRichard Henderson 
3805cd2c5b6SRichard Henderson #undef ALLOC
3815cd2c5b6SRichard Henderson 
3825cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
38354936004Sbellard }
38454936004Sbellard 
38541c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
38654936004Sbellard {
3875cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
38854936004Sbellard }
38954936004Sbellard 
3906d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
391d6f2ea22SAvi Kivity 
392f7bf5461SAvi Kivity static void phys_map_node_reserve(unsigned nodes)
393f7bf5461SAvi Kivity {
394f7bf5461SAvi Kivity     if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395f7bf5461SAvi Kivity         typedef PhysPageEntry Node[L2_SIZE];
396f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398f7bf5461SAvi Kivity                                       phys_map_nodes_nb + nodes);
399f7bf5461SAvi Kivity         phys_map_nodes = g_renew(Node, phys_map_nodes,
400f7bf5461SAvi Kivity                                  phys_map_nodes_nb_alloc);
401f7bf5461SAvi Kivity     }
402f7bf5461SAvi Kivity }
403f7bf5461SAvi Kivity 
404f7bf5461SAvi Kivity static uint16_t phys_map_node_alloc(void)
405d6f2ea22SAvi Kivity {
406d6f2ea22SAvi Kivity     unsigned i;
407d6f2ea22SAvi Kivity     uint16_t ret;
408d6f2ea22SAvi Kivity 
409f7bf5461SAvi Kivity     ret = phys_map_nodes_nb++;
410d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
411f7bf5461SAvi Kivity     assert(ret != phys_map_nodes_nb_alloc);
412d6f2ea22SAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
41307f07b31SAvi Kivity         phys_map_nodes[ret][i].is_leaf = 0;
414c19e8800SAvi Kivity         phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
415d6f2ea22SAvi Kivity     }
416f7bf5461SAvi Kivity     return ret;
417d6f2ea22SAvi Kivity }
418d6f2ea22SAvi Kivity 
419d6f2ea22SAvi Kivity static void phys_map_nodes_reset(void)
420d6f2ea22SAvi Kivity {
421d6f2ea22SAvi Kivity     phys_map_nodes_nb = 0;
422d6f2ea22SAvi Kivity }
423d6f2ea22SAvi Kivity 
424f7bf5461SAvi Kivity 
4252999097bSAvi Kivity static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
4262999097bSAvi Kivity                                 target_phys_addr_t *nb, uint16_t leaf,
4272999097bSAvi Kivity                                 int level)
42892e873b9Sbellard {
429f7bf5461SAvi Kivity     PhysPageEntry *p;
430f7bf5461SAvi Kivity     int i;
43107f07b31SAvi Kivity     target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
4325cd2c5b6SRichard Henderson 
43307f07b31SAvi Kivity     if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
434c19e8800SAvi Kivity         lp->ptr = phys_map_node_alloc();
435c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
436f7bf5461SAvi Kivity         if (level == 0) {
437f7bf5461SAvi Kivity             for (i = 0; i < L2_SIZE; i++) {
43807f07b31SAvi Kivity                 p[i].is_leaf = 1;
439c19e8800SAvi Kivity                 p[i].ptr = phys_section_unassigned;
44067c4d23cSpbrook             }
44192e873b9Sbellard         }
442d6f2ea22SAvi Kivity     } else {
443c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
4444346ae3eSAvi Kivity     }
4452999097bSAvi Kivity     lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
446f7bf5461SAvi Kivity 
4472999097bSAvi Kivity     while (*nb && lp < &p[L2_SIZE]) {
44807f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
44907f07b31SAvi Kivity             lp->is_leaf = true;
450c19e8800SAvi Kivity             lp->ptr = leaf;
45107f07b31SAvi Kivity             *index += step;
45207f07b31SAvi Kivity             *nb -= step;
453f7bf5461SAvi Kivity         } else {
4542999097bSAvi Kivity             phys_page_set_level(lp, index, nb, leaf, level - 1);
4552999097bSAvi Kivity         }
4562999097bSAvi Kivity         ++lp;
457f7bf5461SAvi Kivity     }
4584346ae3eSAvi Kivity }
4595cd2c5b6SRichard Henderson 
4602999097bSAvi Kivity static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
4612999097bSAvi Kivity                           uint16_t leaf)
462f7bf5461SAvi Kivity {
4632999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
46407f07b31SAvi Kivity     phys_map_node_reserve(3 * P_L2_LEVELS);
465f7bf5461SAvi Kivity 
4662999097bSAvi Kivity     phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
46792e873b9Sbellard }
46892e873b9Sbellard 
4690cac1b66SBlue Swirl MemoryRegionSection *phys_page_find(target_phys_addr_t index)
47092e873b9Sbellard {
47131ab2b4aSAvi Kivity     PhysPageEntry lp = phys_map;
47231ab2b4aSAvi Kivity     PhysPageEntry *p;
47331ab2b4aSAvi Kivity     int i;
47431ab2b4aSAvi Kivity     uint16_t s_index = phys_section_unassigned;
475f1f6e3b8SAvi Kivity 
47607f07b31SAvi Kivity     for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
477c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
47831ab2b4aSAvi Kivity             goto not_found;
479f1f6e3b8SAvi Kivity         }
480c19e8800SAvi Kivity         p = phys_map_nodes[lp.ptr];
48131ab2b4aSAvi Kivity         lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
48231ab2b4aSAvi Kivity     }
48331ab2b4aSAvi Kivity 
484c19e8800SAvi Kivity     s_index = lp.ptr;
48531ab2b4aSAvi Kivity not_found:
486f3705d53SAvi Kivity     return &phys_sections[s_index];
487f3705d53SAvi Kivity }
488f3705d53SAvi Kivity 
489e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
490e5548617SBlue Swirl {
491e5548617SBlue Swirl     return mr != &io_mem_ram && mr != &io_mem_rom
492e5548617SBlue Swirl         && mr != &io_mem_notdirty && !mr->rom_device
493e5548617SBlue Swirl         && mr != &io_mem_watch;
494e5548617SBlue Swirl }
495e5548617SBlue Swirl 
496c8a706feSpbrook #define mmap_lock() do { } while(0)
497c8a706feSpbrook #define mmap_unlock() do { } while(0)
4989fa3e853Sbellard #endif
499fd6ce8f6Sbellard 
5004369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
5014369415fSbellard 
5024369415fSbellard #if defined(CONFIG_USER_ONLY)
503ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
5044369415fSbellard    user mode. It will change when a dedicated libc will be used */
5054369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
5064369415fSbellard #endif
5074369415fSbellard 
5084369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
509ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
5114369415fSbellard #endif
5124369415fSbellard 
5138fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
51426a5f13bSbellard {
5154369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
5164369415fSbellard     code_gen_buffer = static_code_gen_buffer;
5174369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
5184369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
5194369415fSbellard #else
52026a5f13bSbellard     code_gen_buffer_size = tb_size;
52126a5f13bSbellard     if (code_gen_buffer_size == 0) {
5224369415fSbellard #if defined(CONFIG_USER_ONLY)
5234369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
5244369415fSbellard #else
525ccbb4d44SStuart Brady         /* XXX: needs adjustments */
52694a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
5274369415fSbellard #endif
52826a5f13bSbellard     }
52926a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
53026a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
53126a5f13bSbellard     /* The code gen buffer location may have constraints depending on
53226a5f13bSbellard        the host cpu and OS */
53326a5f13bSbellard #if defined(__linux__)
53426a5f13bSbellard     {
53526a5f13bSbellard         int flags;
536141ac468Sblueswir1         void *start = NULL;
537141ac468Sblueswir1 
53826a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
53926a5f13bSbellard #if defined(__x86_64__)
54026a5f13bSbellard         flags |= MAP_32BIT;
54126a5f13bSbellard         /* Cannot map more than that */
54226a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
54326a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
544141ac468Sblueswir1 #elif defined(__sparc_v9__)
545141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
546141ac468Sblueswir1         flags |= MAP_FIXED;
547141ac468Sblueswir1         start = (void *) 0x60000000UL;
548141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
549141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5501cb0661eSbalrog #elif defined(__arm__)
5515c84bd90SAurelien Jarno         /* Keep the buffer no bigger than 16MB to branch between blocks */
5521cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5531cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
554eba0b893SRichard Henderson #elif defined(__s390x__)
555eba0b893SRichard Henderson         /* Map the buffer so that we can use direct calls and branches.  */
556eba0b893SRichard Henderson         /* We have a +- 4GB range on the branches; leave some slop.  */
557eba0b893SRichard Henderson         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558eba0b893SRichard Henderson             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559eba0b893SRichard Henderson         }
560eba0b893SRichard Henderson         start = (void *)0x90000000UL;
56126a5f13bSbellard #endif
562141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
56326a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
56426a5f13bSbellard                                flags, -1, 0);
56526a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
56626a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
56726a5f13bSbellard             exit(1);
56826a5f13bSbellard         }
56926a5f13bSbellard     }
570cbb608a5SBrad #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
5719f4b09a4STobias Nygren     || defined(__DragonFly__) || defined(__OpenBSD__) \
5729f4b09a4STobias Nygren     || defined(__NetBSD__)
57306e67a82Saliguori     {
57406e67a82Saliguori         int flags;
57506e67a82Saliguori         void *addr = NULL;
57606e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
57706e67a82Saliguori #if defined(__x86_64__)
57806e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
57906e67a82Saliguori          * 0x40000000 is free */
58006e67a82Saliguori         flags |= MAP_FIXED;
58106e67a82Saliguori         addr = (void *)0x40000000;
58206e67a82Saliguori         /* Cannot map more than that */
58306e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
58406e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
5854cd31ad2SBlue Swirl #elif defined(__sparc_v9__)
5864cd31ad2SBlue Swirl         // Map the buffer below 2G, so we can use direct calls and branches
5874cd31ad2SBlue Swirl         flags |= MAP_FIXED;
5884cd31ad2SBlue Swirl         addr = (void *) 0x60000000UL;
5894cd31ad2SBlue Swirl         if (code_gen_buffer_size > (512 * 1024 * 1024)) {
5904cd31ad2SBlue Swirl             code_gen_buffer_size = (512 * 1024 * 1024);
5914cd31ad2SBlue Swirl         }
59206e67a82Saliguori #endif
59306e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
59406e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
59506e67a82Saliguori                                flags, -1, 0);
59606e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
59706e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
59806e67a82Saliguori             exit(1);
59906e67a82Saliguori         }
60006e67a82Saliguori     }
60126a5f13bSbellard #else
6027267c094SAnthony Liguori     code_gen_buffer = g_malloc(code_gen_buffer_size);
60326a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
60426a5f13bSbellard #endif
6054369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
60626a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
60726a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
608a884da8aSPeter Maydell         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
60926a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
6107267c094SAnthony Liguori     tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
61126a5f13bSbellard }
61226a5f13bSbellard 
61326a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
61426a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
61526a5f13bSbellard    size. */
616d5ab9713SJan Kiszka void tcg_exec_init(unsigned long tb_size)
61726a5f13bSbellard {
61826a5f13bSbellard     cpu_gen_init();
61926a5f13bSbellard     code_gen_alloc(tb_size);
62026a5f13bSbellard     code_gen_ptr = code_gen_buffer;
621813da627SRichard Henderson     tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
6224369415fSbellard     page_init();
6239002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
6249002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
6259002ec79SRichard Henderson        initialize the prologue now.  */
6269002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
6279002ec79SRichard Henderson #endif
62826a5f13bSbellard }
62926a5f13bSbellard 
630d5ab9713SJan Kiszka bool tcg_enabled(void)
631d5ab9713SJan Kiszka {
632d5ab9713SJan Kiszka     return code_gen_buffer != NULL;
633d5ab9713SJan Kiszka }
634d5ab9713SJan Kiszka 
635d5ab9713SJan Kiszka void cpu_exec_init_all(void)
636d5ab9713SJan Kiszka {
637d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
638d5ab9713SJan Kiszka     memory_map_init();
639d5ab9713SJan Kiszka     io_mem_init();
640d5ab9713SJan Kiszka #endif
641d5ab9713SJan Kiszka }
642d5ab9713SJan Kiszka 
6439656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6449656f324Spbrook 
645e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
646e7f4eff7SJuan Quintela {
6479349b4f9SAndreas Färber     CPUArchState *env = opaque;
648e7f4eff7SJuan Quintela 
6493098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
6503098dba0Saurel32        version_id is increased. */
6513098dba0Saurel32     env->interrupt_request &= ~0x01;
6529656f324Spbrook     tlb_flush(env, 1);
6539656f324Spbrook 
6549656f324Spbrook     return 0;
6559656f324Spbrook }
656e7f4eff7SJuan Quintela 
657e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
658e7f4eff7SJuan Quintela     .name = "cpu_common",
659e7f4eff7SJuan Quintela     .version_id = 1,
660e7f4eff7SJuan Quintela     .minimum_version_id = 1,
661e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
662e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
663e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
6649349b4f9SAndreas Färber         VMSTATE_UINT32(halted, CPUArchState),
6659349b4f9SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUArchState),
666e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
667e7f4eff7SJuan Quintela     }
668e7f4eff7SJuan Quintela };
6699656f324Spbrook #endif
6709656f324Spbrook 
6719349b4f9SAndreas Färber CPUArchState *qemu_get_cpu(int cpu)
672950f1472SGlauber Costa {
6739349b4f9SAndreas Färber     CPUArchState *env = first_cpu;
674950f1472SGlauber Costa 
675950f1472SGlauber Costa     while (env) {
676950f1472SGlauber Costa         if (env->cpu_index == cpu)
677950f1472SGlauber Costa             break;
678950f1472SGlauber Costa         env = env->next_cpu;
679950f1472SGlauber Costa     }
680950f1472SGlauber Costa 
681950f1472SGlauber Costa     return env;
682950f1472SGlauber Costa }
683950f1472SGlauber Costa 
6849349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
685fd6ce8f6Sbellard {
6869349b4f9SAndreas Färber     CPUArchState **penv;
6876a00d601Sbellard     int cpu_index;
6886a00d601Sbellard 
689c2764719Spbrook #if defined(CONFIG_USER_ONLY)
690c2764719Spbrook     cpu_list_lock();
691c2764719Spbrook #endif
6926a00d601Sbellard     env->next_cpu = NULL;
6936a00d601Sbellard     penv = &first_cpu;
6946a00d601Sbellard     cpu_index = 0;
6956a00d601Sbellard     while (*penv != NULL) {
6961e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6976a00d601Sbellard         cpu_index++;
6986a00d601Sbellard     }
6996a00d601Sbellard     env->cpu_index = cpu_index;
700268a362cSaliguori     env->numa_node = 0;
70172cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
70272cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
703dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
704dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
705dc7a09cfSJan Kiszka #endif
7066a00d601Sbellard     *penv = env;
707c2764719Spbrook #if defined(CONFIG_USER_ONLY)
708c2764719Spbrook     cpu_list_unlock();
709c2764719Spbrook #endif
710b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
7110be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
7120be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
713b3c7724cSpbrook                     cpu_save, cpu_load, env);
714b3c7724cSpbrook #endif
715fd6ce8f6Sbellard }
716fd6ce8f6Sbellard 
717d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
718d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
719d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
720d1a1eb74STristan Gingold {
721d1a1eb74STristan Gingold     TranslationBlock *tb;
722d1a1eb74STristan Gingold 
723d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
724d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725d1a1eb74STristan Gingold         return NULL;
726d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
727d1a1eb74STristan Gingold     tb->pc = pc;
728d1a1eb74STristan Gingold     tb->cflags = 0;
729d1a1eb74STristan Gingold     return tb;
730d1a1eb74STristan Gingold }
731d1a1eb74STristan Gingold 
732d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
733d1a1eb74STristan Gingold {
734d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
735d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
736d1a1eb74STristan Gingold        be the last one generated.  */
737d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
739d1a1eb74STristan Gingold         nb_tbs--;
740d1a1eb74STristan Gingold     }
741d1a1eb74STristan Gingold }
742d1a1eb74STristan Gingold 
7439fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
7449fa3e853Sbellard {
7459fa3e853Sbellard     if (p->code_bitmap) {
7467267c094SAnthony Liguori         g_free(p->code_bitmap);
7479fa3e853Sbellard         p->code_bitmap = NULL;
7489fa3e853Sbellard     }
7499fa3e853Sbellard     p->code_write_count = 0;
7509fa3e853Sbellard }
7519fa3e853Sbellard 
7525cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
7535cd2c5b6SRichard Henderson 
7545cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
7555cd2c5b6SRichard Henderson {
7565cd2c5b6SRichard Henderson     int i;
7575cd2c5b6SRichard Henderson 
7585cd2c5b6SRichard Henderson     if (*lp == NULL) {
7595cd2c5b6SRichard Henderson         return;
7605cd2c5b6SRichard Henderson     }
7615cd2c5b6SRichard Henderson     if (level == 0) {
7625cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7637296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7645cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7655cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7665cd2c5b6SRichard Henderson         }
7675cd2c5b6SRichard Henderson     } else {
7685cd2c5b6SRichard Henderson         void **pp = *lp;
7697296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7705cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7715cd2c5b6SRichard Henderson         }
7725cd2c5b6SRichard Henderson     }
7735cd2c5b6SRichard Henderson }
7745cd2c5b6SRichard Henderson 
775fd6ce8f6Sbellard static void page_flush_tb(void)
776fd6ce8f6Sbellard {
7775cd2c5b6SRichard Henderson     int i;
7785cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7795cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
780fd6ce8f6Sbellard     }
781fd6ce8f6Sbellard }
782fd6ce8f6Sbellard 
783fd6ce8f6Sbellard /* flush all the translation blocks */
784d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
7859349b4f9SAndreas Färber void tb_flush(CPUArchState *env1)
786fd6ce8f6Sbellard {
7879349b4f9SAndreas Färber     CPUArchState *env;
7880124311eSbellard #if defined(DEBUG_FLUSH)
789ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
791ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
792ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
793fd6ce8f6Sbellard #endif
79426a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
795a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
796a208e54aSpbrook 
797fd6ce8f6Sbellard     nb_tbs = 0;
7986a00d601Sbellard 
7996a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8008a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
8016a00d601Sbellard     }
8029fa3e853Sbellard 
8038a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
804fd6ce8f6Sbellard     page_flush_tb();
8059fa3e853Sbellard 
806fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
807d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
808d4e8164fSbellard        expensive */
809e3db7226Sbellard     tb_flush_count++;
810fd6ce8f6Sbellard }
811fd6ce8f6Sbellard 
812fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
813fd6ce8f6Sbellard 
814bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
815fd6ce8f6Sbellard {
816fd6ce8f6Sbellard     TranslationBlock *tb;
817fd6ce8f6Sbellard     int i;
818fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
81999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
82099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
821fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
8230bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
8240bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
82599773bd4Spbrook                        address, (long)tb->pc, tb->size);
826fd6ce8f6Sbellard             }
827fd6ce8f6Sbellard         }
828fd6ce8f6Sbellard     }
829fd6ce8f6Sbellard }
830fd6ce8f6Sbellard 
831fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
832fd6ce8f6Sbellard static void tb_page_check(void)
833fd6ce8f6Sbellard {
834fd6ce8f6Sbellard     TranslationBlock *tb;
835fd6ce8f6Sbellard     int i, flags1, flags2;
836fd6ce8f6Sbellard 
83799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
83899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
839fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
840fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
841fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
84399773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
844fd6ce8f6Sbellard             }
845fd6ce8f6Sbellard         }
846fd6ce8f6Sbellard     }
847fd6ce8f6Sbellard }
848fd6ce8f6Sbellard 
849fd6ce8f6Sbellard #endif
850fd6ce8f6Sbellard 
851fd6ce8f6Sbellard /* invalidate one TB */
852fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853fd6ce8f6Sbellard                              int next_offset)
854fd6ce8f6Sbellard {
855fd6ce8f6Sbellard     TranslationBlock *tb1;
856fd6ce8f6Sbellard     for(;;) {
857fd6ce8f6Sbellard         tb1 = *ptb;
858fd6ce8f6Sbellard         if (tb1 == tb) {
859fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860fd6ce8f6Sbellard             break;
861fd6ce8f6Sbellard         }
862fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863fd6ce8f6Sbellard     }
864fd6ce8f6Sbellard }
865fd6ce8f6Sbellard 
8669fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8679fa3e853Sbellard {
8689fa3e853Sbellard     TranslationBlock *tb1;
8699fa3e853Sbellard     unsigned int n1;
8709fa3e853Sbellard 
8719fa3e853Sbellard     for(;;) {
8729fa3e853Sbellard         tb1 = *ptb;
8738efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
8748efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
8759fa3e853Sbellard         if (tb1 == tb) {
8769fa3e853Sbellard             *ptb = tb1->page_next[n1];
8779fa3e853Sbellard             break;
8789fa3e853Sbellard         }
8799fa3e853Sbellard         ptb = &tb1->page_next[n1];
8809fa3e853Sbellard     }
8819fa3e853Sbellard }
8829fa3e853Sbellard 
883d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884d4e8164fSbellard {
885d4e8164fSbellard     TranslationBlock *tb1, **ptb;
886d4e8164fSbellard     unsigned int n1;
887d4e8164fSbellard 
888d4e8164fSbellard     ptb = &tb->jmp_next[n];
889d4e8164fSbellard     tb1 = *ptb;
890d4e8164fSbellard     if (tb1) {
891d4e8164fSbellard         /* find tb(n) in circular list */
892d4e8164fSbellard         for(;;) {
893d4e8164fSbellard             tb1 = *ptb;
8948efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
8958efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
896d4e8164fSbellard             if (n1 == n && tb1 == tb)
897d4e8164fSbellard                 break;
898d4e8164fSbellard             if (n1 == 2) {
899d4e8164fSbellard                 ptb = &tb1->jmp_first;
900d4e8164fSbellard             } else {
901d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
902d4e8164fSbellard             }
903d4e8164fSbellard         }
904d4e8164fSbellard         /* now we can suppress tb(n) from the list */
905d4e8164fSbellard         *ptb = tb->jmp_next[n];
906d4e8164fSbellard 
907d4e8164fSbellard         tb->jmp_next[n] = NULL;
908d4e8164fSbellard     }
909d4e8164fSbellard }
910d4e8164fSbellard 
911d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
912d4e8164fSbellard    another TB */
913d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
914d4e8164fSbellard {
9158efe0ca8SStefan Weil     tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
916d4e8164fSbellard }
917d4e8164fSbellard 
91841c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
919fd6ce8f6Sbellard {
9209349b4f9SAndreas Färber     CPUArchState *env;
921fd6ce8f6Sbellard     PageDesc *p;
9228a40a180Sbellard     unsigned int h, n1;
92341c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
9248a40a180Sbellard     TranslationBlock *tb1, *tb2;
925fd6ce8f6Sbellard 
9269fa3e853Sbellard     /* remove the TB from the hash list */
9279fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9289fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9299fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
9309fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
9319fa3e853Sbellard 
9329fa3e853Sbellard     /* remove the TB from the page list */
9339fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
9349fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
9359fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9369fa3e853Sbellard         invalidate_page_bitmap(p);
9379fa3e853Sbellard     }
9389fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
9399fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
9409fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9419fa3e853Sbellard         invalidate_page_bitmap(p);
9429fa3e853Sbellard     }
9439fa3e853Sbellard 
9448a40a180Sbellard     tb_invalidated_flag = 1;
9458a40a180Sbellard 
9468a40a180Sbellard     /* remove the TB from the hash list */
9478a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
9486a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
9496a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
9506a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
9516a00d601Sbellard     }
9528a40a180Sbellard 
9538a40a180Sbellard     /* suppress this TB from the two jump lists */
9548a40a180Sbellard     tb_jmp_remove(tb, 0);
9558a40a180Sbellard     tb_jmp_remove(tb, 1);
9568a40a180Sbellard 
9578a40a180Sbellard     /* suppress any remaining jumps to this TB */
9588a40a180Sbellard     tb1 = tb->jmp_first;
9598a40a180Sbellard     for(;;) {
9608efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
9618a40a180Sbellard         if (n1 == 2)
9628a40a180Sbellard             break;
9638efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9648a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9658a40a180Sbellard         tb_reset_jump(tb1, n1);
9668a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9678a40a180Sbellard         tb1 = tb2;
9688a40a180Sbellard     }
9698efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9708a40a180Sbellard 
971e3db7226Sbellard     tb_phys_invalidate_count++;
9729fa3e853Sbellard }
9739fa3e853Sbellard 
9749fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9759fa3e853Sbellard {
9769fa3e853Sbellard     int end, mask, end1;
9779fa3e853Sbellard 
9789fa3e853Sbellard     end = start + len;
9799fa3e853Sbellard     tab += start >> 3;
9809fa3e853Sbellard     mask = 0xff << (start & 7);
9819fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
9829fa3e853Sbellard         if (start < end) {
9839fa3e853Sbellard             mask &= ~(0xff << (end & 7));
9849fa3e853Sbellard             *tab |= mask;
9859fa3e853Sbellard         }
9869fa3e853Sbellard     } else {
9879fa3e853Sbellard         *tab++ |= mask;
9889fa3e853Sbellard         start = (start + 8) & ~7;
9899fa3e853Sbellard         end1 = end & ~7;
9909fa3e853Sbellard         while (start < end1) {
9919fa3e853Sbellard             *tab++ = 0xff;
9929fa3e853Sbellard             start += 8;
9939fa3e853Sbellard         }
9949fa3e853Sbellard         if (start < end) {
9959fa3e853Sbellard             mask = ~(0xff << (end & 7));
9969fa3e853Sbellard             *tab |= mask;
9979fa3e853Sbellard         }
9989fa3e853Sbellard     }
9999fa3e853Sbellard }
10009fa3e853Sbellard 
10019fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
10029fa3e853Sbellard {
10039fa3e853Sbellard     int n, tb_start, tb_end;
10049fa3e853Sbellard     TranslationBlock *tb;
10059fa3e853Sbellard 
10067267c094SAnthony Liguori     p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
10079fa3e853Sbellard 
10089fa3e853Sbellard     tb = p->first_tb;
10099fa3e853Sbellard     while (tb != NULL) {
10108efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
10118efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
10129fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10139fa3e853Sbellard         if (n == 0) {
10149fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10159fa3e853Sbellard                it is not a problem */
10169fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
10179fa3e853Sbellard             tb_end = tb_start + tb->size;
10189fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
10199fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
10209fa3e853Sbellard         } else {
10219fa3e853Sbellard             tb_start = 0;
10229fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10239fa3e853Sbellard         }
10249fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
10259fa3e853Sbellard         tb = tb->page_next[n];
10269fa3e853Sbellard     }
10279fa3e853Sbellard }
10289fa3e853Sbellard 
10299349b4f9SAndreas Färber TranslationBlock *tb_gen_code(CPUArchState *env,
10302e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
10312e70f6efSpbrook                               int flags, int cflags)
1032d720b93dSbellard {
1033d720b93dSbellard     TranslationBlock *tb;
1034d720b93dSbellard     uint8_t *tc_ptr;
103541c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
103641c1b1c9SPaul Brook     target_ulong virt_page2;
1037d720b93dSbellard     int code_gen_size;
1038d720b93dSbellard 
103941c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
1040c27004ecSbellard     tb = tb_alloc(pc);
1041d720b93dSbellard     if (!tb) {
1042d720b93dSbellard         /* flush must be done */
1043d720b93dSbellard         tb_flush(env);
1044d720b93dSbellard         /* cannot fail at this point */
1045c27004ecSbellard         tb = tb_alloc(pc);
10462e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
10472e70f6efSpbrook         tb_invalidated_flag = 1;
1048d720b93dSbellard     }
1049d720b93dSbellard     tc_ptr = code_gen_ptr;
1050d720b93dSbellard     tb->tc_ptr = tc_ptr;
1051d720b93dSbellard     tb->cs_base = cs_base;
1052d720b93dSbellard     tb->flags = flags;
1053d720b93dSbellard     tb->cflags = cflags;
1054d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
10558efe0ca8SStefan Weil     code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
10568efe0ca8SStefan Weil                              CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1057d720b93dSbellard 
1058d720b93dSbellard     /* check next page if needed */
1059c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1060d720b93dSbellard     phys_page2 = -1;
1061c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
106241c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1063d720b93dSbellard     }
106441c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10652e70f6efSpbrook     return tb;
1066d720b93dSbellard }
1067d720b93dSbellard 
106877a8f1a5SAlexander Graf /*
10698e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
10708e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end may refer to *different* physical pages.
10718e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
10728e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
10738e0fdce3SJan Kiszka  * this TB.
107477a8f1a5SAlexander Graf  */
107577a8f1a5SAlexander Graf void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
107677a8f1a5SAlexander Graf                               int is_cpu_write_access)
107777a8f1a5SAlexander Graf {
107877a8f1a5SAlexander Graf     while (start < end) {
107977a8f1a5SAlexander Graf         tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
108077a8f1a5SAlexander Graf         start &= TARGET_PAGE_MASK;
108177a8f1a5SAlexander Graf         start += TARGET_PAGE_SIZE;
108277a8f1a5SAlexander Graf     }
108377a8f1a5SAlexander Graf }
108477a8f1a5SAlexander Graf 
10858e0fdce3SJan Kiszka /*
10868e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
10878e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end must refer to the *same* physical page.
10888e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
10898e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
10908e0fdce3SJan Kiszka  * this TB.
10918e0fdce3SJan Kiszka  */
109241c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1093d720b93dSbellard                                    int is_cpu_write_access)
10949fa3e853Sbellard {
10956b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
10969349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
109741c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
10986b917547Saliguori     PageDesc *p;
10996b917547Saliguori     int n;
11006b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
11016b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
11026b917547Saliguori     TranslationBlock *current_tb = NULL;
11036b917547Saliguori     int current_tb_modified = 0;
11046b917547Saliguori     target_ulong current_pc = 0;
11056b917547Saliguori     target_ulong current_cs_base = 0;
11066b917547Saliguori     int current_flags = 0;
11076b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
11089fa3e853Sbellard 
11099fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11109fa3e853Sbellard     if (!p)
11119fa3e853Sbellard         return;
11129fa3e853Sbellard     if (!p->code_bitmap &&
1113d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114d720b93dSbellard         is_cpu_write_access) {
11159fa3e853Sbellard         /* build code bitmap */
11169fa3e853Sbellard         build_page_bitmap(p);
11179fa3e853Sbellard     }
11189fa3e853Sbellard 
11199fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
11209fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
11219fa3e853Sbellard     tb = p->first_tb;
11229fa3e853Sbellard     while (tb != NULL) {
11238efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
11248efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
11259fa3e853Sbellard         tb_next = tb->page_next[n];
11269fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
11279fa3e853Sbellard         if (n == 0) {
11289fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
11299fa3e853Sbellard                it is not a problem */
11309fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
11319fa3e853Sbellard             tb_end = tb_start + tb->size;
11329fa3e853Sbellard         } else {
11339fa3e853Sbellard             tb_start = tb->page_addr[1];
11349fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
11359fa3e853Sbellard         }
11369fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1137d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1138d720b93dSbellard             if (current_tb_not_found) {
1139d720b93dSbellard                 current_tb_not_found = 0;
1140d720b93dSbellard                 current_tb = NULL;
11412e70f6efSpbrook                 if (env->mem_io_pc) {
1142d720b93dSbellard                     /* now we have a real cpu fault */
11432e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1144d720b93dSbellard                 }
1145d720b93dSbellard             }
1146d720b93dSbellard             if (current_tb == tb &&
11472e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1148d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1149d720b93dSbellard                 its execution. We could be more precise by checking
1150d720b93dSbellard                 that the modification is after the current PC, but it
1151d720b93dSbellard                 would require a specialized function to partially
1152d720b93dSbellard                 restore the CPU state */
1153d720b93dSbellard 
1154d720b93dSbellard                 current_tb_modified = 1;
1155618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
11566b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11576b917547Saliguori                                      &current_flags);
1158d720b93dSbellard             }
1159d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11606f5a9f7eSbellard             /* we need to do that to handle the case where a signal
11616f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
11626f5a9f7eSbellard             saved_tb = NULL;
11636f5a9f7eSbellard             if (env) {
1164ea1c1802Sbellard                 saved_tb = env->current_tb;
1165ea1c1802Sbellard                 env->current_tb = NULL;
11666f5a9f7eSbellard             }
11679fa3e853Sbellard             tb_phys_invalidate(tb, -1);
11686f5a9f7eSbellard             if (env) {
1169ea1c1802Sbellard                 env->current_tb = saved_tb;
1170ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1171ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
11729fa3e853Sbellard             }
11736f5a9f7eSbellard         }
11749fa3e853Sbellard         tb = tb_next;
11759fa3e853Sbellard     }
11769fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
11779fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
11789fa3e853Sbellard     if (!p->first_tb) {
11799fa3e853Sbellard         invalidate_page_bitmap(p);
1180d720b93dSbellard         if (is_cpu_write_access) {
11812e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1182d720b93dSbellard         }
1183d720b93dSbellard     }
1184d720b93dSbellard #endif
1185d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1186d720b93dSbellard     if (current_tb_modified) {
1187d720b93dSbellard         /* we generate a block containing just the instruction
1188d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1189d720b93dSbellard            itself */
1190ea1c1802Sbellard         env->current_tb = NULL;
11912e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1192d720b93dSbellard         cpu_resume_from_signal(env, NULL);
11939fa3e853Sbellard     }
11949fa3e853Sbellard #endif
11959fa3e853Sbellard }
11969fa3e853Sbellard 
11979fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
119841c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
11999fa3e853Sbellard {
12009fa3e853Sbellard     PageDesc *p;
12019fa3e853Sbellard     int offset, b;
120259817ccbSbellard #if 0
1203a4193c8aSbellard     if (1) {
120493fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
12052e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1206a4193c8aSbellard                   cpu_single_env->eip,
12078efe0ca8SStefan Weil                   cpu_single_env->eip +
12088efe0ca8SStefan Weil                   (intptr_t)cpu_single_env->segs[R_CS].base);
1209a4193c8aSbellard     }
121059817ccbSbellard #endif
12119fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
12129fa3e853Sbellard     if (!p)
12139fa3e853Sbellard         return;
12149fa3e853Sbellard     if (p->code_bitmap) {
12159fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
12169fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
12179fa3e853Sbellard         if (b & ((1 << len) - 1))
12189fa3e853Sbellard             goto do_invalidate;
12199fa3e853Sbellard     } else {
12209fa3e853Sbellard     do_invalidate:
1221d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
12229fa3e853Sbellard     }
12239fa3e853Sbellard }
12249fa3e853Sbellard 
12259fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
122641c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
122720503968SBlue Swirl                                     uintptr_t pc, void *puc)
12289fa3e853Sbellard {
12296b917547Saliguori     TranslationBlock *tb;
12309fa3e853Sbellard     PageDesc *p;
12316b917547Saliguori     int n;
1232d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
12336b917547Saliguori     TranslationBlock *current_tb = NULL;
12349349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
12356b917547Saliguori     int current_tb_modified = 0;
12366b917547Saliguori     target_ulong current_pc = 0;
12376b917547Saliguori     target_ulong current_cs_base = 0;
12386b917547Saliguori     int current_flags = 0;
1239d720b93dSbellard #endif
12409fa3e853Sbellard 
12419fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
12429fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1243fd6ce8f6Sbellard     if (!p)
1244fd6ce8f6Sbellard         return;
1245fd6ce8f6Sbellard     tb = p->first_tb;
1246d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1247d720b93dSbellard     if (tb && pc != 0) {
1248d720b93dSbellard         current_tb = tb_find_pc(pc);
1249d720b93dSbellard     }
1250d720b93dSbellard #endif
1251fd6ce8f6Sbellard     while (tb != NULL) {
12528efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
12538efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1254d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1255d720b93dSbellard         if (current_tb == tb &&
12562e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1257d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1258d720b93dSbellard                    its execution. We could be more precise by checking
1259d720b93dSbellard                    that the modification is after the current PC, but it
1260d720b93dSbellard                    would require a specialized function to partially
1261d720b93dSbellard                    restore the CPU state */
1262d720b93dSbellard 
1263d720b93dSbellard             current_tb_modified = 1;
1264618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
12656b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
12666b917547Saliguori                                  &current_flags);
1267d720b93dSbellard         }
1268d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
12699fa3e853Sbellard         tb_phys_invalidate(tb, addr);
12709fa3e853Sbellard         tb = tb->page_next[n];
1271fd6ce8f6Sbellard     }
1272fd6ce8f6Sbellard     p->first_tb = NULL;
1273d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1274d720b93dSbellard     if (current_tb_modified) {
1275d720b93dSbellard         /* we generate a block containing just the instruction
1276d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1277d720b93dSbellard            itself */
1278ea1c1802Sbellard         env->current_tb = NULL;
12792e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1280d720b93dSbellard         cpu_resume_from_signal(env, puc);
1281d720b93dSbellard     }
1282d720b93dSbellard #endif
1283fd6ce8f6Sbellard }
12849fa3e853Sbellard #endif
1285fd6ce8f6Sbellard 
1286fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
12879fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
128841c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1289fd6ce8f6Sbellard {
1290fd6ce8f6Sbellard     PageDesc *p;
12914429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12924429ab44SJuan Quintela     bool page_already_protected;
12934429ab44SJuan Quintela #endif
12949fa3e853Sbellard 
12959fa3e853Sbellard     tb->page_addr[n] = page_addr;
12965cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
12979fa3e853Sbellard     tb->page_next[n] = p->first_tb;
12984429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12994429ab44SJuan Quintela     page_already_protected = p->first_tb != NULL;
13004429ab44SJuan Quintela #endif
13018efe0ca8SStefan Weil     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
13029fa3e853Sbellard     invalidate_page_bitmap(p);
13039fa3e853Sbellard 
1304107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1305d720b93dSbellard 
13069fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
13079fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
130853a5960aSpbrook         target_ulong addr;
130953a5960aSpbrook         PageDesc *p2;
1310fd6ce8f6Sbellard         int prot;
1311fd6ce8f6Sbellard 
1312fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1313fd6ce8f6Sbellard            page fault + mprotect overhead) */
131453a5960aSpbrook         page_addr &= qemu_host_page_mask;
1315fd6ce8f6Sbellard         prot = 0;
131653a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
131753a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
131853a5960aSpbrook 
131953a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
132053a5960aSpbrook             if (!p2)
132153a5960aSpbrook                 continue;
132253a5960aSpbrook             prot |= p2->flags;
132353a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
132453a5960aSpbrook           }
132553a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1326fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1327fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1328ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
132953a5960aSpbrook                page_addr);
1330fd6ce8f6Sbellard #endif
1331fd6ce8f6Sbellard     }
13329fa3e853Sbellard #else
13339fa3e853Sbellard     /* if some code is already present, then the pages are already
13349fa3e853Sbellard        protected. So we handle the case where only the first TB is
13359fa3e853Sbellard        allocated in a physical page */
13364429ab44SJuan Quintela     if (!page_already_protected) {
13376a00d601Sbellard         tlb_protect_code(page_addr);
13389fa3e853Sbellard     }
13399fa3e853Sbellard #endif
1340d720b93dSbellard 
1341d720b93dSbellard #endif /* TARGET_HAS_SMC */
1342fd6ce8f6Sbellard }
1343fd6ce8f6Sbellard 
13449fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
13459fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
134641c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
134741c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1348d4e8164fSbellard {
13499fa3e853Sbellard     unsigned int h;
13509fa3e853Sbellard     TranslationBlock **ptb;
13519fa3e853Sbellard 
1352c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1353c8a706feSpbrook        before we are done.  */
1354c8a706feSpbrook     mmap_lock();
13559fa3e853Sbellard     /* add in the physical hash table */
13569fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
13579fa3e853Sbellard     ptb = &tb_phys_hash[h];
13589fa3e853Sbellard     tb->phys_hash_next = *ptb;
13599fa3e853Sbellard     *ptb = tb;
1360fd6ce8f6Sbellard 
1361fd6ce8f6Sbellard     /* add in the page list */
13629fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
13639fa3e853Sbellard     if (phys_page2 != -1)
13649fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
13659fa3e853Sbellard     else
13669fa3e853Sbellard         tb->page_addr[1] = -1;
13679fa3e853Sbellard 
13688efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1369d4e8164fSbellard     tb->jmp_next[0] = NULL;
1370d4e8164fSbellard     tb->jmp_next[1] = NULL;
1371d4e8164fSbellard 
1372d4e8164fSbellard     /* init original jump addresses */
1373d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1374d4e8164fSbellard         tb_reset_jump(tb, 0);
1375d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1376d4e8164fSbellard         tb_reset_jump(tb, 1);
13778a40a180Sbellard 
13788a40a180Sbellard #ifdef DEBUG_TB_CHECK
13798a40a180Sbellard     tb_page_check();
13808a40a180Sbellard #endif
1381c8a706feSpbrook     mmap_unlock();
1382fd6ce8f6Sbellard }
1383fd6ce8f6Sbellard 
1384a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
13866375e09eSStefan Weil TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1387a513fe19Sbellard {
1388a513fe19Sbellard     int m_min, m_max, m;
13898efe0ca8SStefan Weil     uintptr_t v;
1390a513fe19Sbellard     TranslationBlock *tb;
1391a513fe19Sbellard 
1392a513fe19Sbellard     if (nb_tbs <= 0)
1393a513fe19Sbellard         return NULL;
13948efe0ca8SStefan Weil     if (tc_ptr < (uintptr_t)code_gen_buffer ||
13958efe0ca8SStefan Weil         tc_ptr >= (uintptr_t)code_gen_ptr) {
1396a513fe19Sbellard         return NULL;
13978efe0ca8SStefan Weil     }
1398a513fe19Sbellard     /* binary search (cf Knuth) */
1399a513fe19Sbellard     m_min = 0;
1400a513fe19Sbellard     m_max = nb_tbs - 1;
1401a513fe19Sbellard     while (m_min <= m_max) {
1402a513fe19Sbellard         m = (m_min + m_max) >> 1;
1403a513fe19Sbellard         tb = &tbs[m];
14048efe0ca8SStefan Weil         v = (uintptr_t)tb->tc_ptr;
1405a513fe19Sbellard         if (v == tc_ptr)
1406a513fe19Sbellard             return tb;
1407a513fe19Sbellard         else if (tc_ptr < v) {
1408a513fe19Sbellard             m_max = m - 1;
1409a513fe19Sbellard         } else {
1410a513fe19Sbellard             m_min = m + 1;
1411a513fe19Sbellard         }
1412a513fe19Sbellard     }
1413a513fe19Sbellard     return &tbs[m_max];
1414a513fe19Sbellard }
14157501267eSbellard 
1416ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1417ea041c0eSbellard 
1418ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419ea041c0eSbellard {
1420ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1421ea041c0eSbellard     unsigned int n1;
1422ea041c0eSbellard 
1423ea041c0eSbellard     tb1 = tb->jmp_next[n];
1424ea041c0eSbellard     if (tb1 != NULL) {
1425ea041c0eSbellard         /* find head of list */
1426ea041c0eSbellard         for(;;) {
14278efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14288efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1429ea041c0eSbellard             if (n1 == 2)
1430ea041c0eSbellard                 break;
1431ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1432ea041c0eSbellard         }
1433ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1434ea041c0eSbellard         tb_next = tb1;
1435ea041c0eSbellard 
1436ea041c0eSbellard         /* remove tb from the jmp_first list */
1437ea041c0eSbellard         ptb = &tb_next->jmp_first;
1438ea041c0eSbellard         for(;;) {
1439ea041c0eSbellard             tb1 = *ptb;
14408efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14418efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1442ea041c0eSbellard             if (n1 == n && tb1 == tb)
1443ea041c0eSbellard                 break;
1444ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1445ea041c0eSbellard         }
1446ea041c0eSbellard         *ptb = tb->jmp_next[n];
1447ea041c0eSbellard         tb->jmp_next[n] = NULL;
1448ea041c0eSbellard 
1449ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1450ea041c0eSbellard         tb_reset_jump(tb, n);
1451ea041c0eSbellard 
14520124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1453ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1454ea041c0eSbellard     }
1455ea041c0eSbellard }
1456ea041c0eSbellard 
1457ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1458ea041c0eSbellard {
1459ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1460ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1461ea041c0eSbellard }
1462ea041c0eSbellard 
14631fddef4bSbellard #if defined(TARGET_HAS_ICE)
146494df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
14659349b4f9SAndreas Färber static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
146694df27fdSPaul Brook {
146794df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
146894df27fdSPaul Brook }
146994df27fdSPaul Brook #else
14701e7855a5SMax Filippov void tb_invalidate_phys_addr(target_phys_addr_t addr)
1471d720b93dSbellard {
1472c227f099SAnthony Liguori     ram_addr_t ram_addr;
1473f3705d53SAvi Kivity     MemoryRegionSection *section;
1474d720b93dSbellard 
147506ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
1476f3705d53SAvi Kivity     if (!(memory_region_is_ram(section->mr)
1477f3705d53SAvi Kivity           || (section->mr->rom_device && section->mr->readable))) {
147806ef3525SAvi Kivity         return;
147906ef3525SAvi Kivity     }
1480f3705d53SAvi Kivity     ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1481cc5bea60SBlue Swirl         + memory_region_section_addr(section, addr);
1482706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1483d720b93dSbellard }
14841e7855a5SMax Filippov 
14851e7855a5SMax Filippov static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
14861e7855a5SMax Filippov {
14879d70c4b7SMax Filippov     tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
14889d70c4b7SMax Filippov             (pc & ~TARGET_PAGE_MASK));
14891e7855a5SMax Filippov }
1490c27004ecSbellard #endif
149194df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1492d720b93dSbellard 
1493c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
14949349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1495c527ee8fSPaul Brook 
1496c527ee8fSPaul Brook {
1497c527ee8fSPaul Brook }
1498c527ee8fSPaul Brook 
14999349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1500c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1501c527ee8fSPaul Brook {
1502c527ee8fSPaul Brook     return -ENOSYS;
1503c527ee8fSPaul Brook }
1504c527ee8fSPaul Brook #else
15056658ffb8Spbrook /* Add a watchpoint.  */
15069349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1507a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
15086658ffb8Spbrook {
1509b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1510c0ce998eSaliguori     CPUWatchpoint *wp;
15116658ffb8Spbrook 
1512b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
15130dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
15140dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
1515b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517b4051334Saliguori         return -EINVAL;
1518b4051334Saliguori     }
15197267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
15206658ffb8Spbrook 
1521a1d1bb31Saliguori     wp->vaddr = addr;
1522b4051334Saliguori     wp->len_mask = len_mask;
1523a1d1bb31Saliguori     wp->flags = flags;
1524a1d1bb31Saliguori 
15252dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1526c0ce998eSaliguori     if (flags & BP_GDB)
152772cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1528c0ce998eSaliguori     else
152972cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1530a1d1bb31Saliguori 
15316658ffb8Spbrook     tlb_flush_page(env, addr);
1532a1d1bb31Saliguori 
1533a1d1bb31Saliguori     if (watchpoint)
1534a1d1bb31Saliguori         *watchpoint = wp;
1535a1d1bb31Saliguori     return 0;
15366658ffb8Spbrook }
15376658ffb8Spbrook 
1538a1d1bb31Saliguori /* Remove a specific watchpoint.  */
15399349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1540a1d1bb31Saliguori                           int flags)
15416658ffb8Spbrook {
1542b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1543a1d1bb31Saliguori     CPUWatchpoint *wp;
15446658ffb8Spbrook 
154572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1546b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
15476e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1548a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
15496658ffb8Spbrook             return 0;
15506658ffb8Spbrook         }
15516658ffb8Spbrook     }
1552a1d1bb31Saliguori     return -ENOENT;
15536658ffb8Spbrook }
15546658ffb8Spbrook 
1555a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
15569349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1557a1d1bb31Saliguori {
155872cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
15597d03f82fSedgar_igl 
1560a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1561a1d1bb31Saliguori 
15627267c094SAnthony Liguori     g_free(watchpoint);
15637d03f82fSedgar_igl }
15647d03f82fSedgar_igl 
1565a1d1bb31Saliguori /* Remove all matching watchpoints.  */
15669349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1567a1d1bb31Saliguori {
1568c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1569a1d1bb31Saliguori 
157072cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1571a1d1bb31Saliguori         if (wp->flags & mask)
1572a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1573a1d1bb31Saliguori     }
1574c0ce998eSaliguori }
1575c527ee8fSPaul Brook #endif
1576a1d1bb31Saliguori 
1577a1d1bb31Saliguori /* Add a breakpoint.  */
15789349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1579a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
15804c3a88a2Sbellard {
15811fddef4bSbellard #if defined(TARGET_HAS_ICE)
1582c0ce998eSaliguori     CPUBreakpoint *bp;
15834c3a88a2Sbellard 
15847267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
15854c3a88a2Sbellard 
1586a1d1bb31Saliguori     bp->pc = pc;
1587a1d1bb31Saliguori     bp->flags = flags;
1588a1d1bb31Saliguori 
15892dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1590c0ce998eSaliguori     if (flags & BP_GDB)
159172cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1592c0ce998eSaliguori     else
159372cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1594d720b93dSbellard 
1595d720b93dSbellard     breakpoint_invalidate(env, pc);
1596a1d1bb31Saliguori 
1597a1d1bb31Saliguori     if (breakpoint)
1598a1d1bb31Saliguori         *breakpoint = bp;
15994c3a88a2Sbellard     return 0;
16004c3a88a2Sbellard #else
1601a1d1bb31Saliguori     return -ENOSYS;
16024c3a88a2Sbellard #endif
16034c3a88a2Sbellard }
16044c3a88a2Sbellard 
1605a1d1bb31Saliguori /* Remove a specific breakpoint.  */
16069349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1607a1d1bb31Saliguori {
16087d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1609a1d1bb31Saliguori     CPUBreakpoint *bp;
1610a1d1bb31Saliguori 
161172cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1612a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1613a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1614a1d1bb31Saliguori             return 0;
16157d03f82fSedgar_igl         }
1616a1d1bb31Saliguori     }
1617a1d1bb31Saliguori     return -ENOENT;
1618a1d1bb31Saliguori #else
1619a1d1bb31Saliguori     return -ENOSYS;
16207d03f82fSedgar_igl #endif
16217d03f82fSedgar_igl }
16227d03f82fSedgar_igl 
1623a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
16249349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
16254c3a88a2Sbellard {
16261fddef4bSbellard #if defined(TARGET_HAS_ICE)
162772cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1628d720b93dSbellard 
1629a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1630a1d1bb31Saliguori 
16317267c094SAnthony Liguori     g_free(breakpoint);
1632a1d1bb31Saliguori #endif
1633a1d1bb31Saliguori }
1634a1d1bb31Saliguori 
1635a1d1bb31Saliguori /* Remove all matching breakpoints. */
16369349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1637a1d1bb31Saliguori {
1638a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1639c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1640a1d1bb31Saliguori 
164172cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1642a1d1bb31Saliguori         if (bp->flags & mask)
1643a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1644c0ce998eSaliguori     }
16454c3a88a2Sbellard #endif
16464c3a88a2Sbellard }
16474c3a88a2Sbellard 
1648c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1649c33a346eSbellard    CPU loop after each instruction */
16509349b4f9SAndreas Färber void cpu_single_step(CPUArchState *env, int enabled)
1651c33a346eSbellard {
16521fddef4bSbellard #if defined(TARGET_HAS_ICE)
1653c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1654c33a346eSbellard         env->singlestep_enabled = enabled;
1655e22a25c9Saliguori         if (kvm_enabled())
1656e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1657e22a25c9Saliguori         else {
1658ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
16599fa3e853Sbellard             /* XXX: only flush what is necessary */
16600124311eSbellard             tb_flush(env);
1661c33a346eSbellard         }
1662e22a25c9Saliguori     }
1663c33a346eSbellard #endif
1664c33a346eSbellard }
1665c33a346eSbellard 
16669349b4f9SAndreas Färber static void cpu_unlink_tb(CPUArchState *env)
1667ea041c0eSbellard {
1668d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1669d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1670d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1671d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16723098dba0Saurel32     TranslationBlock *tb;
1673c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16743098dba0Saurel32 
1675cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16763098dba0Saurel32     tb = env->current_tb;
16773098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16783098dba0Saurel32        all the potentially executing TB */
1679f76cfe56SRiku Voipio     if (tb) {
16803098dba0Saurel32         env->current_tb = NULL;
16813098dba0Saurel32         tb_reset_jump_recursive(tb);
16823098dba0Saurel32     }
1683cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16843098dba0Saurel32 }
16853098dba0Saurel32 
168697ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
16873098dba0Saurel32 /* mask must never be zero, except for A20 change call */
16889349b4f9SAndreas Färber static void tcg_handle_interrupt(CPUArchState *env, int mask)
16893098dba0Saurel32 {
16903098dba0Saurel32     int old_mask;
16913098dba0Saurel32 
16923098dba0Saurel32     old_mask = env->interrupt_request;
16933098dba0Saurel32     env->interrupt_request |= mask;
16943098dba0Saurel32 
16958edac960Saliguori     /*
16968edac960Saliguori      * If called from iothread context, wake the target cpu in
16978edac960Saliguori      * case its halted.
16988edac960Saliguori      */
1699b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
17008edac960Saliguori         qemu_cpu_kick(env);
17018edac960Saliguori         return;
17028edac960Saliguori     }
17038edac960Saliguori 
17042e70f6efSpbrook     if (use_icount) {
1705266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
17062e70f6efSpbrook         if (!can_do_io(env)
1707be214e6cSaurel32             && (mask & ~old_mask) != 0) {
17082e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
17092e70f6efSpbrook         }
17102e70f6efSpbrook     } else {
17113098dba0Saurel32         cpu_unlink_tb(env);
1712ea041c0eSbellard     }
17132e70f6efSpbrook }
1714ea041c0eSbellard 
1715ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716ec6959d0SJan Kiszka 
171797ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
171897ffbd8dSJan Kiszka 
17199349b4f9SAndreas Färber void cpu_interrupt(CPUArchState *env, int mask)
172097ffbd8dSJan Kiszka {
172197ffbd8dSJan Kiszka     env->interrupt_request |= mask;
172297ffbd8dSJan Kiszka     cpu_unlink_tb(env);
172397ffbd8dSJan Kiszka }
172497ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
172597ffbd8dSJan Kiszka 
17269349b4f9SAndreas Färber void cpu_reset_interrupt(CPUArchState *env, int mask)
1727b54ad049Sbellard {
1728b54ad049Sbellard     env->interrupt_request &= ~mask;
1729b54ad049Sbellard }
1730b54ad049Sbellard 
17319349b4f9SAndreas Färber void cpu_exit(CPUArchState *env)
17323098dba0Saurel32 {
17333098dba0Saurel32     env->exit_request = 1;
17343098dba0Saurel32     cpu_unlink_tb(env);
17353098dba0Saurel32 }
17363098dba0Saurel32 
17379349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
17387501267eSbellard {
17397501267eSbellard     va_list ap;
1740493ae1f0Spbrook     va_list ap2;
17417501267eSbellard 
17427501267eSbellard     va_start(ap, fmt);
1743493ae1f0Spbrook     va_copy(ap2, ap);
17447501267eSbellard     fprintf(stderr, "qemu: fatal: ");
17457501267eSbellard     vfprintf(stderr, fmt, ap);
17467501267eSbellard     fprintf(stderr, "\n");
17477501267eSbellard #ifdef TARGET_I386
17487fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
17497fe48483Sbellard #else
17507fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
17517501267eSbellard #endif
175293fcfe39Saliguori     if (qemu_log_enabled()) {
175393fcfe39Saliguori         qemu_log("qemu: fatal: ");
175493fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
175593fcfe39Saliguori         qemu_log("\n");
1756f9373291Sj_mayer #ifdef TARGET_I386
175793fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1758f9373291Sj_mayer #else
175993fcfe39Saliguori         log_cpu_state(env, 0);
1760f9373291Sj_mayer #endif
176131b1a7b4Saliguori         qemu_log_flush();
176293fcfe39Saliguori         qemu_log_close();
1763924edcaeSbalrog     }
1764493ae1f0Spbrook     va_end(ap2);
1765f9373291Sj_mayer     va_end(ap);
1766fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1767fd052bf6SRiku Voipio     {
1768fd052bf6SRiku Voipio         struct sigaction act;
1769fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1770fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1771fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1772fd052bf6SRiku Voipio     }
1773fd052bf6SRiku Voipio #endif
17747501267eSbellard     abort();
17757501267eSbellard }
17767501267eSbellard 
17779349b4f9SAndreas Färber CPUArchState *cpu_copy(CPUArchState *env)
1778c5be9f08Sths {
17799349b4f9SAndreas Färber     CPUArchState *new_env = cpu_init(env->cpu_model_str);
17809349b4f9SAndreas Färber     CPUArchState *next_cpu = new_env->next_cpu;
1781c5be9f08Sths     int cpu_index = new_env->cpu_index;
17825a38f081Saliguori #if defined(TARGET_HAS_ICE)
17835a38f081Saliguori     CPUBreakpoint *bp;
17845a38f081Saliguori     CPUWatchpoint *wp;
17855a38f081Saliguori #endif
17865a38f081Saliguori 
17879349b4f9SAndreas Färber     memcpy(new_env, env, sizeof(CPUArchState));
17885a38f081Saliguori 
17895a38f081Saliguori     /* Preserve chaining and index. */
1790c5be9f08Sths     new_env->next_cpu = next_cpu;
1791c5be9f08Sths     new_env->cpu_index = cpu_index;
17925a38f081Saliguori 
17935a38f081Saliguori     /* Clone all break/watchpoints.
17945a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
17955a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
179672cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
179772cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
17985a38f081Saliguori #if defined(TARGET_HAS_ICE)
179972cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
18005a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
18015a38f081Saliguori     }
180272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18035a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18045a38f081Saliguori                               wp->flags, NULL);
18055a38f081Saliguori     }
18065a38f081Saliguori #endif
18075a38f081Saliguori 
1808c5be9f08Sths     return new_env;
1809c5be9f08Sths }
1810c5be9f08Sths 
18110124311eSbellard #if !defined(CONFIG_USER_ONLY)
18120cac1b66SBlue Swirl void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
18135c751e99Sedgar_igl {
18145c751e99Sedgar_igl     unsigned int i;
18155c751e99Sedgar_igl 
18165c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
18175c751e99Sedgar_igl        overlap the flushed page.  */
18185c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
18195c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18205c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18215c751e99Sedgar_igl 
18225c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
18235c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18245c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18255c751e99Sedgar_igl }
18265c751e99Sedgar_igl 
1827d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1828d24981d3SJuan Quintela                                       uintptr_t length)
18291ccde1cbSbellard {
1830d24981d3SJuan Quintela     uintptr_t start1;
1831f23db169Sbellard 
18321ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18331ccde1cbSbellard        when accessing the range */
18348efe0ca8SStefan Weil     start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1835a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
18365579c7f3Spbrook        address comparisons below.  */
18378efe0ca8SStefan Weil     if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
18385579c7f3Spbrook             != (end - 1) - start) {
18395579c7f3Spbrook         abort();
18405579c7f3Spbrook     }
1841e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
1842d24981d3SJuan Quintela 
1843d24981d3SJuan Quintela }
1844d24981d3SJuan Quintela 
1845d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
1846d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1847d24981d3SJuan Quintela                                      int dirty_flags)
1848d24981d3SJuan Quintela {
1849d24981d3SJuan Quintela     uintptr_t length;
1850d24981d3SJuan Quintela 
1851d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
1852d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
1853d24981d3SJuan Quintela 
1854d24981d3SJuan Quintela     length = end - start;
1855d24981d3SJuan Quintela     if (length == 0)
1856d24981d3SJuan Quintela         return;
1857d24981d3SJuan Quintela     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1858d24981d3SJuan Quintela 
1859d24981d3SJuan Quintela     if (tcg_enabled()) {
1860d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
1861d24981d3SJuan Quintela     }
18621ccde1cbSbellard }
18631ccde1cbSbellard 
186474576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
186574576198Saliguori {
1866f6f3fbcaSMichael S. Tsirkin     int ret = 0;
186774576198Saliguori     in_migration = enable;
1868f6f3fbcaSMichael S. Tsirkin     return ret;
186974576198Saliguori }
187074576198Saliguori 
1871e5548617SBlue Swirl target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1872e5548617SBlue Swirl                                                    MemoryRegionSection *section,
1873e5548617SBlue Swirl                                                    target_ulong vaddr,
1874e5548617SBlue Swirl                                                    target_phys_addr_t paddr,
1875e5548617SBlue Swirl                                                    int prot,
1876e5548617SBlue Swirl                                                    target_ulong *address)
1877e5548617SBlue Swirl {
1878e5548617SBlue Swirl     target_phys_addr_t iotlb;
1879e5548617SBlue Swirl     CPUWatchpoint *wp;
1880e5548617SBlue Swirl 
1881cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1882e5548617SBlue Swirl         /* Normal RAM.  */
1883e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1884cc5bea60SBlue Swirl             + memory_region_section_addr(section, paddr);
1885e5548617SBlue Swirl         if (!section->readonly) {
1886e5548617SBlue Swirl             iotlb |= phys_section_notdirty;
1887e5548617SBlue Swirl         } else {
1888e5548617SBlue Swirl             iotlb |= phys_section_rom;
1889e5548617SBlue Swirl         }
1890e5548617SBlue Swirl     } else {
1891e5548617SBlue Swirl         /* IO handlers are currently passed a physical address.
1892e5548617SBlue Swirl            It would be nice to pass an offset from the base address
1893e5548617SBlue Swirl            of that region.  This would avoid having to special case RAM,
1894e5548617SBlue Swirl            and avoid full address decoding in every device.
1895e5548617SBlue Swirl            We can't use the high bits of pd for this because
1896e5548617SBlue Swirl            IO_MEM_ROMD uses these as a ram address.  */
1897e5548617SBlue Swirl         iotlb = section - phys_sections;
1898cc5bea60SBlue Swirl         iotlb += memory_region_section_addr(section, paddr);
1899e5548617SBlue Swirl     }
1900e5548617SBlue Swirl 
1901e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1902e5548617SBlue Swirl        watchpoint trap routines.  */
1903e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1904e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1905e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1906e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1907e5548617SBlue Swirl                 iotlb = phys_section_watch + paddr;
1908e5548617SBlue Swirl                 *address |= TLB_MMIO;
1909e5548617SBlue Swirl                 break;
1910e5548617SBlue Swirl             }
1911e5548617SBlue Swirl         }
1912e5548617SBlue Swirl     }
1913e5548617SBlue Swirl 
1914e5548617SBlue Swirl     return iotlb;
1915e5548617SBlue Swirl }
1916e5548617SBlue Swirl 
19170124311eSbellard #else
1918edf8e2afSMika Westerberg /*
1919edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
1920edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
1921edf8e2afSMika Westerberg  */
19225cd2c5b6SRichard Henderson 
19235cd2c5b6SRichard Henderson struct walk_memory_regions_data
192433417e70Sbellard {
19255cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
19265cd2c5b6SRichard Henderson     void *priv;
19278efe0ca8SStefan Weil     uintptr_t start;
19285cd2c5b6SRichard Henderson     int prot;
19295cd2c5b6SRichard Henderson };
19309fa3e853Sbellard 
19315cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1932b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
19335cd2c5b6SRichard Henderson {
19345cd2c5b6SRichard Henderson     if (data->start != -1ul) {
19355cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
19365cd2c5b6SRichard Henderson         if (rc != 0) {
19375cd2c5b6SRichard Henderson             return rc;
19385cd2c5b6SRichard Henderson         }
19395cd2c5b6SRichard Henderson     }
1940edf8e2afSMika Westerberg 
19415cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
19425cd2c5b6SRichard Henderson     data->prot = new_prot;
19435cd2c5b6SRichard Henderson 
19445cd2c5b6SRichard Henderson     return 0;
194533417e70Sbellard }
19465cd2c5b6SRichard Henderson 
19475cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1948b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
19495cd2c5b6SRichard Henderson {
1950b480d9b7SPaul Brook     abi_ulong pa;
19515cd2c5b6SRichard Henderson     int i, rc;
19525cd2c5b6SRichard Henderson 
19535cd2c5b6SRichard Henderson     if (*lp == NULL) {
19545cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
19559fa3e853Sbellard     }
19565cd2c5b6SRichard Henderson 
19575cd2c5b6SRichard Henderson     if (level == 0) {
19585cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
19597296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
19605cd2c5b6SRichard Henderson             int prot = pd[i].flags;
19615cd2c5b6SRichard Henderson 
19625cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
19635cd2c5b6SRichard Henderson             if (prot != data->prot) {
19645cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
19655cd2c5b6SRichard Henderson                 if (rc != 0) {
19665cd2c5b6SRichard Henderson                     return rc;
19679fa3e853Sbellard                 }
19689fa3e853Sbellard             }
19695cd2c5b6SRichard Henderson         }
19705cd2c5b6SRichard Henderson     } else {
19715cd2c5b6SRichard Henderson         void **pp = *lp;
19727296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
1973b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
1974b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
19755cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
19765cd2c5b6SRichard Henderson             if (rc != 0) {
19775cd2c5b6SRichard Henderson                 return rc;
19785cd2c5b6SRichard Henderson             }
19795cd2c5b6SRichard Henderson         }
19805cd2c5b6SRichard Henderson     }
19815cd2c5b6SRichard Henderson 
19825cd2c5b6SRichard Henderson     return 0;
19835cd2c5b6SRichard Henderson }
19845cd2c5b6SRichard Henderson 
19855cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
19865cd2c5b6SRichard Henderson {
19875cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
19888efe0ca8SStefan Weil     uintptr_t i;
19895cd2c5b6SRichard Henderson 
19905cd2c5b6SRichard Henderson     data.fn = fn;
19915cd2c5b6SRichard Henderson     data.priv = priv;
19925cd2c5b6SRichard Henderson     data.start = -1ul;
19935cd2c5b6SRichard Henderson     data.prot = 0;
19945cd2c5b6SRichard Henderson 
19955cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
1996b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
19975cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
19985cd2c5b6SRichard Henderson         if (rc != 0) {
19995cd2c5b6SRichard Henderson             return rc;
20005cd2c5b6SRichard Henderson         }
20015cd2c5b6SRichard Henderson     }
20025cd2c5b6SRichard Henderson 
20035cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2004edf8e2afSMika Westerberg }
2005edf8e2afSMika Westerberg 
2006b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2007b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2008edf8e2afSMika Westerberg {
2009edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2010edf8e2afSMika Westerberg 
2011b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2012b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2013edf8e2afSMika Westerberg         start, end, end - start,
2014edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2015edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2016edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2017edf8e2afSMika Westerberg 
2018edf8e2afSMika Westerberg     return (0);
2019edf8e2afSMika Westerberg }
2020edf8e2afSMika Westerberg 
2021edf8e2afSMika Westerberg /* dump memory mappings */
2022edf8e2afSMika Westerberg void page_dump(FILE *f)
2023edf8e2afSMika Westerberg {
2024edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2025edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2026edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
20279fa3e853Sbellard }
20289fa3e853Sbellard 
202953a5960aSpbrook int page_get_flags(target_ulong address)
20309fa3e853Sbellard {
20319fa3e853Sbellard     PageDesc *p;
20329fa3e853Sbellard 
20339fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
20349fa3e853Sbellard     if (!p)
20359fa3e853Sbellard         return 0;
20369fa3e853Sbellard     return p->flags;
20379fa3e853Sbellard }
20389fa3e853Sbellard 
2039376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2040376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2041376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
204253a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
20439fa3e853Sbellard {
2044376a7909SRichard Henderson     target_ulong addr, len;
20459fa3e853Sbellard 
2046376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2047376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2048376a7909SRichard Henderson        a missing call to h2g_valid.  */
2049b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2051376a7909SRichard Henderson #endif
2052376a7909SRichard Henderson     assert(start < end);
2053376a7909SRichard Henderson 
20549fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
20559fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2056376a7909SRichard Henderson 
2057376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
20589fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2059376a7909SRichard Henderson     }
2060376a7909SRichard Henderson 
2061376a7909SRichard Henderson     for (addr = start, len = end - start;
2062376a7909SRichard Henderson          len != 0;
2063376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2064376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2065376a7909SRichard Henderson 
2066376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2067376a7909SRichard Henderson            the code inside.  */
20689fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
20699fa3e853Sbellard             (flags & PAGE_WRITE) &&
20709fa3e853Sbellard             p->first_tb) {
2071d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
20729fa3e853Sbellard         }
20739fa3e853Sbellard         p->flags = flags;
20749fa3e853Sbellard     }
20759fa3e853Sbellard }
20769fa3e853Sbellard 
20773d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
20783d97b40bSths {
20793d97b40bSths     PageDesc *p;
20803d97b40bSths     target_ulong end;
20813d97b40bSths     target_ulong addr;
20823d97b40bSths 
2083376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2084376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2085376a7909SRichard Henderson        a missing call to h2g_valid.  */
2086338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2088376a7909SRichard Henderson #endif
2089376a7909SRichard Henderson 
20903e0650a9SRichard Henderson     if (len == 0) {
20913e0650a9SRichard Henderson         return 0;
20923e0650a9SRichard Henderson     }
2093376a7909SRichard Henderson     if (start + len - 1 < start) {
2094376a7909SRichard Henderson         /* We've wrapped around.  */
209555f280c9Sbalrog         return -1;
2096376a7909SRichard Henderson     }
209755f280c9Sbalrog 
20983d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
20993d97b40bSths     start = start & TARGET_PAGE_MASK;
21003d97b40bSths 
2101376a7909SRichard Henderson     for (addr = start, len = end - start;
2102376a7909SRichard Henderson          len != 0;
2103376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
21043d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21053d97b40bSths         if( !p )
21063d97b40bSths             return -1;
21073d97b40bSths         if( !(p->flags & PAGE_VALID) )
21083d97b40bSths             return -1;
21093d97b40bSths 
2110dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21113d97b40bSths             return -1;
2112dae3270cSbellard         if (flags & PAGE_WRITE) {
2113dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21143d97b40bSths                 return -1;
2115dae3270cSbellard             /* unprotect the page if it was put read-only because it
2116dae3270cSbellard                contains translated code */
2117dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2118dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2119dae3270cSbellard                     return -1;
2120dae3270cSbellard             }
2121dae3270cSbellard             return 0;
2122dae3270cSbellard         }
21233d97b40bSths     }
21243d97b40bSths     return 0;
21253d97b40bSths }
21263d97b40bSths 
21279fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2128ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
21296375e09eSStefan Weil int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
21309fa3e853Sbellard {
213145d679d6SAurelien Jarno     unsigned int prot;
213245d679d6SAurelien Jarno     PageDesc *p;
213353a5960aSpbrook     target_ulong host_start, host_end, addr;
21349fa3e853Sbellard 
2135c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2136c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2137c8a706feSpbrook        practice it seems to be ok.  */
2138c8a706feSpbrook     mmap_lock();
2139c8a706feSpbrook 
214045d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
214145d679d6SAurelien Jarno     if (!p) {
2142c8a706feSpbrook         mmap_unlock();
21439fa3e853Sbellard         return 0;
2144c8a706feSpbrook     }
214545d679d6SAurelien Jarno 
21469fa3e853Sbellard     /* if the page was really writable, then we change its
21479fa3e853Sbellard        protection back to writable */
214845d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
214945d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
215045d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
215145d679d6SAurelien Jarno 
215245d679d6SAurelien Jarno         prot = 0;
215345d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
215445d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
215545d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
215645d679d6SAurelien Jarno             prot |= p->flags;
215745d679d6SAurelien Jarno 
21589fa3e853Sbellard             /* and since the content will be modified, we must invalidate
21599fa3e853Sbellard                the corresponding translated code. */
216045d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
21619fa3e853Sbellard #ifdef DEBUG_TB_CHECK
216245d679d6SAurelien Jarno             tb_invalidate_check(addr);
21639fa3e853Sbellard #endif
216445d679d6SAurelien Jarno         }
216545d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
216645d679d6SAurelien Jarno                  prot & PAGE_BITS);
216745d679d6SAurelien Jarno 
2168c8a706feSpbrook         mmap_unlock();
21699fa3e853Sbellard         return 1;
21709fa3e853Sbellard     }
2171c8a706feSpbrook     mmap_unlock();
21729fa3e853Sbellard     return 0;
21739fa3e853Sbellard }
21749fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
217533417e70Sbellard 
2176e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
21778da3ff18Spbrook 
2178c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179c04b2b78SPaul Brook typedef struct subpage_t {
218070c68e44SAvi Kivity     MemoryRegion iomem;
2181c04b2b78SPaul Brook     target_phys_addr_t base;
21825312bd8bSAvi Kivity     uint16_t sub_section[TARGET_PAGE_SIZE];
2183c04b2b78SPaul Brook } subpage_t;
2184c04b2b78SPaul Brook 
2185c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21865312bd8bSAvi Kivity                              uint16_t section);
21870f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base);
21885312bd8bSAvi Kivity static void destroy_page_desc(uint16_t section_index)
218954688b1eSAvi Kivity {
21905312bd8bSAvi Kivity     MemoryRegionSection *section = &phys_sections[section_index];
21915312bd8bSAvi Kivity     MemoryRegion *mr = section->mr;
219254688b1eSAvi Kivity 
219354688b1eSAvi Kivity     if (mr->subpage) {
219454688b1eSAvi Kivity         subpage_t *subpage = container_of(mr, subpage_t, iomem);
219554688b1eSAvi Kivity         memory_region_destroy(&subpage->iomem);
219654688b1eSAvi Kivity         g_free(subpage);
219754688b1eSAvi Kivity     }
219854688b1eSAvi Kivity }
219954688b1eSAvi Kivity 
22004346ae3eSAvi Kivity static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
220154688b1eSAvi Kivity {
220254688b1eSAvi Kivity     unsigned i;
2203d6f2ea22SAvi Kivity     PhysPageEntry *p;
220454688b1eSAvi Kivity 
2205c19e8800SAvi Kivity     if (lp->ptr == PHYS_MAP_NODE_NIL) {
220654688b1eSAvi Kivity         return;
220754688b1eSAvi Kivity     }
220854688b1eSAvi Kivity 
2209c19e8800SAvi Kivity     p = phys_map_nodes[lp->ptr];
221054688b1eSAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
221107f07b31SAvi Kivity         if (!p[i].is_leaf) {
221254688b1eSAvi Kivity             destroy_l2_mapping(&p[i], level - 1);
22134346ae3eSAvi Kivity         } else {
2214c19e8800SAvi Kivity             destroy_page_desc(p[i].ptr);
22154346ae3eSAvi Kivity         }
221654688b1eSAvi Kivity     }
221707f07b31SAvi Kivity     lp->is_leaf = 0;
2218c19e8800SAvi Kivity     lp->ptr = PHYS_MAP_NODE_NIL;
221954688b1eSAvi Kivity }
222054688b1eSAvi Kivity 
222154688b1eSAvi Kivity static void destroy_all_mappings(void)
222254688b1eSAvi Kivity {
22233eef53dfSAvi Kivity     destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2224d6f2ea22SAvi Kivity     phys_map_nodes_reset();
222554688b1eSAvi Kivity }
222654688b1eSAvi Kivity 
22275312bd8bSAvi Kivity static uint16_t phys_section_add(MemoryRegionSection *section)
22285312bd8bSAvi Kivity {
22295312bd8bSAvi Kivity     if (phys_sections_nb == phys_sections_nb_alloc) {
22305312bd8bSAvi Kivity         phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
22315312bd8bSAvi Kivity         phys_sections = g_renew(MemoryRegionSection, phys_sections,
22325312bd8bSAvi Kivity                                 phys_sections_nb_alloc);
22335312bd8bSAvi Kivity     }
22345312bd8bSAvi Kivity     phys_sections[phys_sections_nb] = *section;
22355312bd8bSAvi Kivity     return phys_sections_nb++;
22365312bd8bSAvi Kivity }
22375312bd8bSAvi Kivity 
22385312bd8bSAvi Kivity static void phys_sections_clear(void)
22395312bd8bSAvi Kivity {
22405312bd8bSAvi Kivity     phys_sections_nb = 0;
22415312bd8bSAvi Kivity }
22425312bd8bSAvi Kivity 
22438f2498f9SMichael S. Tsirkin /* register physical memory.
22448f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
22458f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
22468da3ff18Spbrook    io memory page.  The address used when calling the IO function is
22478da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2248ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
22498da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
22508da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
22510f0cb164SAvi Kivity static void register_subpage(MemoryRegionSection *section)
22520f0cb164SAvi Kivity {
22530f0cb164SAvi Kivity     subpage_t *subpage;
22540f0cb164SAvi Kivity     target_phys_addr_t base = section->offset_within_address_space
22550f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
2256f3705d53SAvi Kivity     MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
22570f0cb164SAvi Kivity     MemoryRegionSection subsection = {
22580f0cb164SAvi Kivity         .offset_within_address_space = base,
22590f0cb164SAvi Kivity         .size = TARGET_PAGE_SIZE,
22600f0cb164SAvi Kivity     };
22610f0cb164SAvi Kivity     target_phys_addr_t start, end;
22620f0cb164SAvi Kivity 
2263f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
22640f0cb164SAvi Kivity 
2265f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
22660f0cb164SAvi Kivity         subpage = subpage_init(base);
22670f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
22682999097bSAvi Kivity         phys_page_set(base >> TARGET_PAGE_BITS, 1,
22692999097bSAvi Kivity                       phys_section_add(&subsection));
22700f0cb164SAvi Kivity     } else {
2271f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
22720f0cb164SAvi Kivity     }
22730f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
22740f0cb164SAvi Kivity     end = start + section->size;
22750f0cb164SAvi Kivity     subpage_register(subpage, start, end, phys_section_add(section));
22760f0cb164SAvi Kivity }
22770f0cb164SAvi Kivity 
22780f0cb164SAvi Kivity 
22790f0cb164SAvi Kivity static void register_multipage(MemoryRegionSection *section)
228033417e70Sbellard {
2281dd81124bSAvi Kivity     target_phys_addr_t start_addr = section->offset_within_address_space;
2282dd81124bSAvi Kivity     ram_addr_t size = section->size;
22832999097bSAvi Kivity     target_phys_addr_t addr;
22845312bd8bSAvi Kivity     uint16_t section_index = phys_section_add(section);
2285dd81124bSAvi Kivity 
22863b8e6a2dSEdgar E. Iglesias     assert(size);
2287f6f3fbcaSMichael S. Tsirkin 
22883b8e6a2dSEdgar E. Iglesias     addr = start_addr;
22892999097bSAvi Kivity     phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
22902999097bSAvi Kivity                   section_index);
229133417e70Sbellard }
229233417e70Sbellard 
22930f0cb164SAvi Kivity void cpu_register_physical_memory_log(MemoryRegionSection *section,
22940f0cb164SAvi Kivity                                       bool readonly)
22950f0cb164SAvi Kivity {
22960f0cb164SAvi Kivity     MemoryRegionSection now = *section, remain = *section;
22970f0cb164SAvi Kivity 
22980f0cb164SAvi Kivity     if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
22990f0cb164SAvi Kivity         || (now.size < TARGET_PAGE_SIZE)) {
23000f0cb164SAvi Kivity         now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
23010f0cb164SAvi Kivity                        - now.offset_within_address_space,
23020f0cb164SAvi Kivity                        now.size);
23030f0cb164SAvi Kivity         register_subpage(&now);
23040f0cb164SAvi Kivity         remain.size -= now.size;
23050f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
23060f0cb164SAvi Kivity         remain.offset_within_region += now.size;
23070f0cb164SAvi Kivity     }
23080f0cb164SAvi Kivity     now = remain;
23090f0cb164SAvi Kivity     now.size &= TARGET_PAGE_MASK;
23100f0cb164SAvi Kivity     if (now.size) {
23110f0cb164SAvi Kivity         register_multipage(&now);
23120f0cb164SAvi Kivity         remain.size -= now.size;
23130f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
23140f0cb164SAvi Kivity         remain.offset_within_region += now.size;
23150f0cb164SAvi Kivity     }
23160f0cb164SAvi Kivity     now = remain;
23170f0cb164SAvi Kivity     if (now.size) {
23180f0cb164SAvi Kivity         register_subpage(&now);
23190f0cb164SAvi Kivity     }
23200f0cb164SAvi Kivity }
23210f0cb164SAvi Kivity 
23220f0cb164SAvi Kivity 
2323c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2324f65ed4c1Saliguori {
2325f65ed4c1Saliguori     if (kvm_enabled())
2326f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2327f65ed4c1Saliguori }
2328f65ed4c1Saliguori 
2329c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2330f65ed4c1Saliguori {
2331f65ed4c1Saliguori     if (kvm_enabled())
2332f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2333f65ed4c1Saliguori }
2334f65ed4c1Saliguori 
233562a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
233662a2744cSSheng Yang {
233762a2744cSSheng Yang     if (kvm_enabled())
233862a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
233962a2744cSSheng Yang }
234062a2744cSSheng Yang 
2341c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2342c902760fSMarcelo Tosatti 
2343c902760fSMarcelo Tosatti #include <sys/vfs.h>
2344c902760fSMarcelo Tosatti 
2345c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2346c902760fSMarcelo Tosatti 
2347c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2348c902760fSMarcelo Tosatti {
2349c902760fSMarcelo Tosatti     struct statfs fs;
2350c902760fSMarcelo Tosatti     int ret;
2351c902760fSMarcelo Tosatti 
2352c902760fSMarcelo Tosatti     do {
2353c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2354c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2355c902760fSMarcelo Tosatti 
2356c902760fSMarcelo Tosatti     if (ret != 0) {
23576adc0549SMichael Tokarev         perror(path);
2358c902760fSMarcelo Tosatti         return 0;
2359c902760fSMarcelo Tosatti     }
2360c902760fSMarcelo Tosatti 
2361c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2362c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2363c902760fSMarcelo Tosatti 
2364c902760fSMarcelo Tosatti     return fs.f_bsize;
2365c902760fSMarcelo Tosatti }
2366c902760fSMarcelo Tosatti 
236704b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
236804b16653SAlex Williamson                             ram_addr_t memory,
236904b16653SAlex Williamson                             const char *path)
2370c902760fSMarcelo Tosatti {
2371c902760fSMarcelo Tosatti     char *filename;
2372c902760fSMarcelo Tosatti     void *area;
2373c902760fSMarcelo Tosatti     int fd;
2374c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2375c902760fSMarcelo Tosatti     int flags;
2376c902760fSMarcelo Tosatti #endif
2377c902760fSMarcelo Tosatti     unsigned long hpagesize;
2378c902760fSMarcelo Tosatti 
2379c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2380c902760fSMarcelo Tosatti     if (!hpagesize) {
2381c902760fSMarcelo Tosatti         return NULL;
2382c902760fSMarcelo Tosatti     }
2383c902760fSMarcelo Tosatti 
2384c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2385c902760fSMarcelo Tosatti         return NULL;
2386c902760fSMarcelo Tosatti     }
2387c902760fSMarcelo Tosatti 
2388c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2389c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2390c902760fSMarcelo Tosatti         return NULL;
2391c902760fSMarcelo Tosatti     }
2392c902760fSMarcelo Tosatti 
2393c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2394c902760fSMarcelo Tosatti         return NULL;
2395c902760fSMarcelo Tosatti     }
2396c902760fSMarcelo Tosatti 
2397c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2398c902760fSMarcelo Tosatti     if (fd < 0) {
23996adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2400c902760fSMarcelo Tosatti         free(filename);
2401c902760fSMarcelo Tosatti         return NULL;
2402c902760fSMarcelo Tosatti     }
2403c902760fSMarcelo Tosatti     unlink(filename);
2404c902760fSMarcelo Tosatti     free(filename);
2405c902760fSMarcelo Tosatti 
2406c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2407c902760fSMarcelo Tosatti 
2408c902760fSMarcelo Tosatti     /*
2409c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2410c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2411c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2412c902760fSMarcelo Tosatti      * mmap will fail.
2413c902760fSMarcelo Tosatti      */
2414c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2415c902760fSMarcelo Tosatti         perror("ftruncate");
2416c902760fSMarcelo Tosatti 
2417c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2418c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2419c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2420c902760fSMarcelo Tosatti      * to sidestep this quirk.
2421c902760fSMarcelo Tosatti      */
2422c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2423c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2424c902760fSMarcelo Tosatti #else
2425c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2426c902760fSMarcelo Tosatti #endif
2427c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2428c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2429c902760fSMarcelo Tosatti         close(fd);
2430c902760fSMarcelo Tosatti         return (NULL);
2431c902760fSMarcelo Tosatti     }
243204b16653SAlex Williamson     block->fd = fd;
2433c902760fSMarcelo Tosatti     return area;
2434c902760fSMarcelo Tosatti }
2435c902760fSMarcelo Tosatti #endif
2436c902760fSMarcelo Tosatti 
2437d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2438d17b5288SAlex Williamson {
243904b16653SAlex Williamson     RAMBlock *block, *next_block;
24403e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
244104b16653SAlex Williamson 
244204b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
244304b16653SAlex Williamson         return 0;
244404b16653SAlex Williamson 
244504b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2446f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
244704b16653SAlex Williamson 
244804b16653SAlex Williamson         end = block->offset + block->length;
244904b16653SAlex Williamson 
245004b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
245104b16653SAlex Williamson             if (next_block->offset >= end) {
245204b16653SAlex Williamson                 next = MIN(next, next_block->offset);
245304b16653SAlex Williamson             }
245404b16653SAlex Williamson         }
245504b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
245604b16653SAlex Williamson             offset = end;
245704b16653SAlex Williamson             mingap = next - end;
245804b16653SAlex Williamson         }
245904b16653SAlex Williamson     }
24603e837b2cSAlex Williamson 
24613e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
24623e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
24633e837b2cSAlex Williamson                 (uint64_t)size);
24643e837b2cSAlex Williamson         abort();
24653e837b2cSAlex Williamson     }
24663e837b2cSAlex Williamson 
246704b16653SAlex Williamson     return offset;
246804b16653SAlex Williamson }
246904b16653SAlex Williamson 
247004b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
247104b16653SAlex Williamson {
2472d17b5288SAlex Williamson     RAMBlock *block;
2473d17b5288SAlex Williamson     ram_addr_t last = 0;
2474d17b5288SAlex Williamson 
2475d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2476d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2477d17b5288SAlex Williamson 
2478d17b5288SAlex Williamson     return last;
2479d17b5288SAlex Williamson }
2480d17b5288SAlex Williamson 
2481c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
248284b89d78SCam Macdonell {
248384b89d78SCam Macdonell     RAMBlock *new_block, *block;
248484b89d78SCam Macdonell 
2485c5705a77SAvi Kivity     new_block = NULL;
2486c5705a77SAvi Kivity     QLIST_FOREACH(block, &ram_list.blocks, next) {
2487c5705a77SAvi Kivity         if (block->offset == addr) {
2488c5705a77SAvi Kivity             new_block = block;
2489c5705a77SAvi Kivity             break;
2490c5705a77SAvi Kivity         }
2491c5705a77SAvi Kivity     }
2492c5705a77SAvi Kivity     assert(new_block);
2493c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
249484b89d78SCam Macdonell 
249509e5ab63SAnthony Liguori     if (dev) {
249609e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
249784b89d78SCam Macdonell         if (id) {
249884b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
24997267c094SAnthony Liguori             g_free(id);
250084b89d78SCam Macdonell         }
250184b89d78SCam Macdonell     }
250284b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
250384b89d78SCam Macdonell 
250484b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
2505c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
250684b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
250784b89d78SCam Macdonell                     new_block->idstr);
250884b89d78SCam Macdonell             abort();
250984b89d78SCam Macdonell         }
251084b89d78SCam Macdonell     }
2511c5705a77SAvi Kivity }
2512c5705a77SAvi Kivity 
2513c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2514c5705a77SAvi Kivity                                    MemoryRegion *mr)
2515c5705a77SAvi Kivity {
2516c5705a77SAvi Kivity     RAMBlock *new_block;
2517c5705a77SAvi Kivity 
2518c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
2519c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
252084b89d78SCam Macdonell 
25217c637366SAvi Kivity     new_block->mr = mr;
2522432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
25236977dfe6SYoshiaki Tamura     if (host) {
252484b89d78SCam Macdonell         new_block->host = host;
2525cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
25266977dfe6SYoshiaki Tamura     } else {
2527c902760fSMarcelo Tosatti         if (mem_path) {
2528c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
252904b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2530618a568dSMarcelo Tosatti             if (!new_block->host) {
2531618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
2532e78815a5SAndreas Färber                 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2533618a568dSMarcelo Tosatti             }
2534c902760fSMarcelo Tosatti #else
2535c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2536c902760fSMarcelo Tosatti             exit(1);
2537c902760fSMarcelo Tosatti #endif
2538c902760fSMarcelo Tosatti         } else {
25396b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2540ff83678aSChristian Borntraeger             /* S390 KVM requires the topmost vma of the RAM to be smaller than
2541ff83678aSChristian Borntraeger                an system defined value, which is at least 256GB. Larger systems
2542ff83678aSChristian Borntraeger                have larger values. We put the guest between the end of data
2543ff83678aSChristian Borntraeger                segment (system break) and this value. We use 32GB as a base to
2544ff83678aSChristian Borntraeger                have enough room for the system break to grow. */
2545ff83678aSChristian Borntraeger             new_block->host = mmap((void*)0x800000000, size,
2546c902760fSMarcelo Tosatti                                    PROT_EXEC|PROT_READ|PROT_WRITE,
2547ff83678aSChristian Borntraeger                                    MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2548fb8b2735SAlexander Graf             if (new_block->host == MAP_FAILED) {
2549fb8b2735SAlexander Graf                 fprintf(stderr, "Allocating RAM failed\n");
2550fb8b2735SAlexander Graf                 abort();
2551fb8b2735SAlexander Graf             }
25526b02494dSAlexander Graf #else
2553868bb33fSJan Kiszka             if (xen_enabled()) {
2554fce537d4SAvi Kivity                 xen_ram_alloc(new_block->offset, size, mr);
2555432d268cSJun Nakajima             } else {
255694a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2557432d268cSJun Nakajima             }
25586b02494dSAlexander Graf #endif
2559e78815a5SAndreas Färber             qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2560c902760fSMarcelo Tosatti         }
25616977dfe6SYoshiaki Tamura     }
256294a6b54fSpbrook     new_block->length = size;
256394a6b54fSpbrook 
2564f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
256594a6b54fSpbrook 
25667267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
256704b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
2568d17b5288SAlex Williamson     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
256994a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
257094a6b54fSpbrook 
25716f0437e8SJan Kiszka     if (kvm_enabled())
25726f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
25736f0437e8SJan Kiszka 
257494a6b54fSpbrook     return new_block->offset;
257594a6b54fSpbrook }
2576e9a1ab19Sbellard 
2577c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
25786977dfe6SYoshiaki Tamura {
2579c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
25806977dfe6SYoshiaki Tamura }
25816977dfe6SYoshiaki Tamura 
25821f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
25831f2e98b6SAlex Williamson {
25841f2e98b6SAlex Williamson     RAMBlock *block;
25851f2e98b6SAlex Williamson 
25861f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
25871f2e98b6SAlex Williamson         if (addr == block->offset) {
25881f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
25897267c094SAnthony Liguori             g_free(block);
25901f2e98b6SAlex Williamson             return;
25911f2e98b6SAlex Williamson         }
25921f2e98b6SAlex Williamson     }
25931f2e98b6SAlex Williamson }
25941f2e98b6SAlex Williamson 
2595c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2596e9a1ab19Sbellard {
259704b16653SAlex Williamson     RAMBlock *block;
259804b16653SAlex Williamson 
259904b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
260004b16653SAlex Williamson         if (addr == block->offset) {
260104b16653SAlex Williamson             QLIST_REMOVE(block, next);
2602cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2603cd19cfa2SHuang Ying                 ;
2604cd19cfa2SHuang Ying             } else if (mem_path) {
260504b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
260604b16653SAlex Williamson                 if (block->fd) {
260704b16653SAlex Williamson                     munmap(block->host, block->length);
260804b16653SAlex Williamson                     close(block->fd);
260904b16653SAlex Williamson                 } else {
261004b16653SAlex Williamson                     qemu_vfree(block->host);
261104b16653SAlex Williamson                 }
2612fd28aa13SJan Kiszka #else
2613fd28aa13SJan Kiszka                 abort();
261404b16653SAlex Williamson #endif
261504b16653SAlex Williamson             } else {
261604b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
261704b16653SAlex Williamson                 munmap(block->host, block->length);
261804b16653SAlex Williamson #else
2619868bb33fSJan Kiszka                 if (xen_enabled()) {
2620e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
2621432d268cSJun Nakajima                 } else {
262204b16653SAlex Williamson                     qemu_vfree(block->host);
2623432d268cSJun Nakajima                 }
262404b16653SAlex Williamson #endif
262504b16653SAlex Williamson             }
26267267c094SAnthony Liguori             g_free(block);
262704b16653SAlex Williamson             return;
262804b16653SAlex Williamson         }
262904b16653SAlex Williamson     }
263004b16653SAlex Williamson 
2631e9a1ab19Sbellard }
2632e9a1ab19Sbellard 
2633cd19cfa2SHuang Ying #ifndef _WIN32
2634cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2635cd19cfa2SHuang Ying {
2636cd19cfa2SHuang Ying     RAMBlock *block;
2637cd19cfa2SHuang Ying     ram_addr_t offset;
2638cd19cfa2SHuang Ying     int flags;
2639cd19cfa2SHuang Ying     void *area, *vaddr;
2640cd19cfa2SHuang Ying 
2641cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
2642cd19cfa2SHuang Ying         offset = addr - block->offset;
2643cd19cfa2SHuang Ying         if (offset < block->length) {
2644cd19cfa2SHuang Ying             vaddr = block->host + offset;
2645cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2646cd19cfa2SHuang Ying                 ;
2647cd19cfa2SHuang Ying             } else {
2648cd19cfa2SHuang Ying                 flags = MAP_FIXED;
2649cd19cfa2SHuang Ying                 munmap(vaddr, length);
2650cd19cfa2SHuang Ying                 if (mem_path) {
2651cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
2652cd19cfa2SHuang Ying                     if (block->fd) {
2653cd19cfa2SHuang Ying #ifdef MAP_POPULATE
2654cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2655cd19cfa2SHuang Ying                             MAP_PRIVATE;
2656cd19cfa2SHuang Ying #else
2657cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
2658cd19cfa2SHuang Ying #endif
2659cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2660cd19cfa2SHuang Ying                                     flags, block->fd, offset);
2661cd19cfa2SHuang Ying                     } else {
2662cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2663cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2664cd19cfa2SHuang Ying                                     flags, -1, 0);
2665cd19cfa2SHuang Ying                     }
2666fd28aa13SJan Kiszka #else
2667fd28aa13SJan Kiszka                     abort();
2668cd19cfa2SHuang Ying #endif
2669cd19cfa2SHuang Ying                 } else {
2670cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2671cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
2672cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2673cd19cfa2SHuang Ying                                 flags, -1, 0);
2674cd19cfa2SHuang Ying #else
2675cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2676cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2677cd19cfa2SHuang Ying                                 flags, -1, 0);
2678cd19cfa2SHuang Ying #endif
2679cd19cfa2SHuang Ying                 }
2680cd19cfa2SHuang Ying                 if (area != vaddr) {
2681f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
2682f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2683cd19cfa2SHuang Ying                             length, addr);
2684cd19cfa2SHuang Ying                     exit(1);
2685cd19cfa2SHuang Ying                 }
2686cd19cfa2SHuang Ying                 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2687cd19cfa2SHuang Ying             }
2688cd19cfa2SHuang Ying             return;
2689cd19cfa2SHuang Ying         }
2690cd19cfa2SHuang Ying     }
2691cd19cfa2SHuang Ying }
2692cd19cfa2SHuang Ying #endif /* !_WIN32 */
2693cd19cfa2SHuang Ying 
2694dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
26955579c7f3Spbrook    With the exception of the softmmu code in this file, this should
26965579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
26975579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
26985579c7f3Spbrook 
26995579c7f3Spbrook    It should not be used for general purpose DMA.
27005579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
27015579c7f3Spbrook  */
2702c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2703dc828ca1Spbrook {
270494a6b54fSpbrook     RAMBlock *block;
270594a6b54fSpbrook 
2706f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2707f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
27087d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
27097d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
2710f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
2711f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
27127d82af38SVincent Palatin             }
2713868bb33fSJan Kiszka             if (xen_enabled()) {
2714432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2715432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2716712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2717432d268cSJun Nakajima                  */
2718432d268cSJun Nakajima                 if (block->offset == 0) {
2719e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2720432d268cSJun Nakajima                 } else if (block->host == NULL) {
2721e41d7c69SJan Kiszka                     block->host =
2722e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2723432d268cSJun Nakajima                 }
2724432d268cSJun Nakajima             }
2725f471a17eSAlex Williamson             return block->host + (addr - block->offset);
272694a6b54fSpbrook         }
2727f471a17eSAlex Williamson     }
2728f471a17eSAlex Williamson 
272994a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
273094a6b54fSpbrook     abort();
2731f471a17eSAlex Williamson 
2732f471a17eSAlex Williamson     return NULL;
2733dc828ca1Spbrook }
2734dc828ca1Spbrook 
2735b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
2736b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2737b2e0a138SMichael S. Tsirkin  */
2738b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
2739b2e0a138SMichael S. Tsirkin {
2740b2e0a138SMichael S. Tsirkin     RAMBlock *block;
2741b2e0a138SMichael S. Tsirkin 
2742b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
2743b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
2744868bb33fSJan Kiszka             if (xen_enabled()) {
2745432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2746432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2747712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2748432d268cSJun Nakajima                  */
2749432d268cSJun Nakajima                 if (block->offset == 0) {
2750e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2751432d268cSJun Nakajima                 } else if (block->host == NULL) {
2752e41d7c69SJan Kiszka                     block->host =
2753e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2754432d268cSJun Nakajima                 }
2755432d268cSJun Nakajima             }
2756b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
2757b2e0a138SMichael S. Tsirkin         }
2758b2e0a138SMichael S. Tsirkin     }
2759b2e0a138SMichael S. Tsirkin 
2760b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2761b2e0a138SMichael S. Tsirkin     abort();
2762b2e0a138SMichael S. Tsirkin 
2763b2e0a138SMichael S. Tsirkin     return NULL;
2764b2e0a138SMichael S. Tsirkin }
2765b2e0a138SMichael S. Tsirkin 
276638bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
276738bee5dcSStefano Stabellini  * but takes a size argument */
27688ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
276938bee5dcSStefano Stabellini {
27708ab934f9SStefano Stabellini     if (*size == 0) {
27718ab934f9SStefano Stabellini         return NULL;
27728ab934f9SStefano Stabellini     }
2773868bb33fSJan Kiszka     if (xen_enabled()) {
2774e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
2775868bb33fSJan Kiszka     } else {
277638bee5dcSStefano Stabellini         RAMBlock *block;
277738bee5dcSStefano Stabellini 
277838bee5dcSStefano Stabellini         QLIST_FOREACH(block, &ram_list.blocks, next) {
277938bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
278038bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
278138bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
278238bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
278338bee5dcSStefano Stabellini             }
278438bee5dcSStefano Stabellini         }
278538bee5dcSStefano Stabellini 
278638bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
278738bee5dcSStefano Stabellini         abort();
278838bee5dcSStefano Stabellini     }
278938bee5dcSStefano Stabellini }
279038bee5dcSStefano Stabellini 
2791050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
2792050a0ddfSAnthony PERARD {
2793050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
2794050a0ddfSAnthony PERARD }
2795050a0ddfSAnthony PERARD 
2796e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
27975579c7f3Spbrook {
279894a6b54fSpbrook     RAMBlock *block;
279994a6b54fSpbrook     uint8_t *host = ptr;
280094a6b54fSpbrook 
2801868bb33fSJan Kiszka     if (xen_enabled()) {
2802e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
2803712c2b41SStefano Stabellini         return 0;
2804712c2b41SStefano Stabellini     }
2805712c2b41SStefano Stabellini 
2806f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2807432d268cSJun Nakajima         /* This case append when the block is not mapped. */
2808432d268cSJun Nakajima         if (block->host == NULL) {
2809432d268cSJun Nakajima             continue;
2810432d268cSJun Nakajima         }
2811f471a17eSAlex Williamson         if (host - block->host < block->length) {
2812e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
2813e890261fSMarcelo Tosatti             return 0;
281494a6b54fSpbrook         }
2815f471a17eSAlex Williamson     }
2816432d268cSJun Nakajima 
2817e890261fSMarcelo Tosatti     return -1;
2818e890261fSMarcelo Tosatti }
2819f471a17eSAlex Williamson 
2820e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
2821e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
2822e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2823e890261fSMarcelo Tosatti {
2824e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
2825e890261fSMarcelo Tosatti 
2826e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
282794a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
282894a6b54fSpbrook         abort();
2829e890261fSMarcelo Tosatti     }
2830e890261fSMarcelo Tosatti     return ram_addr;
28315579c7f3Spbrook }
28325579c7f3Spbrook 
28330e0df1e2SAvi Kivity static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
28340e0df1e2SAvi Kivity                                     unsigned size)
283533417e70Sbellard {
283667d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2837ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
283867d3b957Spbrook #endif
28395b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28400e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2841e18231a3Sblueswir1 #endif
2842e18231a3Sblueswir1     return 0;
2843e18231a3Sblueswir1 }
2844e18231a3Sblueswir1 
28450e0df1e2SAvi Kivity static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
28460e0df1e2SAvi Kivity                                  uint64_t val, unsigned size)
2847e18231a3Sblueswir1 {
2848e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
28490e0df1e2SAvi Kivity     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2850e18231a3Sblueswir1 #endif
28515b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28520e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2853e18231a3Sblueswir1 #endif
2854e18231a3Sblueswir1 }
2855e18231a3Sblueswir1 
28560e0df1e2SAvi Kivity static const MemoryRegionOps unassigned_mem_ops = {
28570e0df1e2SAvi Kivity     .read = unassigned_mem_read,
28580e0df1e2SAvi Kivity     .write = unassigned_mem_write,
28590e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
286033417e70Sbellard };
286133417e70Sbellard 
28620e0df1e2SAvi Kivity static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
28630e0df1e2SAvi Kivity                                unsigned size)
28640e0df1e2SAvi Kivity {
28650e0df1e2SAvi Kivity     abort();
28660e0df1e2SAvi Kivity }
28670e0df1e2SAvi Kivity 
28680e0df1e2SAvi Kivity static void error_mem_write(void *opaque, target_phys_addr_t addr,
28690e0df1e2SAvi Kivity                             uint64_t value, unsigned size)
28700e0df1e2SAvi Kivity {
28710e0df1e2SAvi Kivity     abort();
28720e0df1e2SAvi Kivity }
28730e0df1e2SAvi Kivity 
28740e0df1e2SAvi Kivity static const MemoryRegionOps error_mem_ops = {
28750e0df1e2SAvi Kivity     .read = error_mem_read,
28760e0df1e2SAvi Kivity     .write = error_mem_write,
28770e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
287833417e70Sbellard };
287933417e70Sbellard 
28800e0df1e2SAvi Kivity static const MemoryRegionOps rom_mem_ops = {
28810e0df1e2SAvi Kivity     .read = error_mem_read,
28820e0df1e2SAvi Kivity     .write = unassigned_mem_write,
28830e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
28840e0df1e2SAvi Kivity };
28850e0df1e2SAvi Kivity 
28860e0df1e2SAvi Kivity static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
28870e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
28881ccde1cbSbellard {
28893a7d929eSbellard     int dirty_flags;
2890f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
28913a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
28923a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
28930e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
2894f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
28953a7d929eSbellard #endif
28963a7d929eSbellard     }
28970e0df1e2SAvi Kivity     switch (size) {
28980e0df1e2SAvi Kivity     case 1:
28995579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
29000e0df1e2SAvi Kivity         break;
29010e0df1e2SAvi Kivity     case 2:
29025579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
29030e0df1e2SAvi Kivity         break;
29040e0df1e2SAvi Kivity     case 4:
29055579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
29060e0df1e2SAvi Kivity         break;
29070e0df1e2SAvi Kivity     default:
29080e0df1e2SAvi Kivity         abort();
29090e0df1e2SAvi Kivity     }
2910f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2911f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2912f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2913f23db169Sbellard        flushed */
2914f23db169Sbellard     if (dirty_flags == 0xff)
29152e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29161ccde1cbSbellard }
29171ccde1cbSbellard 
29180e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
29190e0df1e2SAvi Kivity     .read = error_mem_read,
29200e0df1e2SAvi Kivity     .write = notdirty_mem_write,
29210e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
29221ccde1cbSbellard };
29231ccde1cbSbellard 
29240f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2925b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
29260f459d16Spbrook {
29279349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
292806d55cc1Saliguori     target_ulong pc, cs_base;
292906d55cc1Saliguori     TranslationBlock *tb;
29300f459d16Spbrook     target_ulong vaddr;
2931a1d1bb31Saliguori     CPUWatchpoint *wp;
293206d55cc1Saliguori     int cpu_flags;
29330f459d16Spbrook 
293406d55cc1Saliguori     if (env->watchpoint_hit) {
293506d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
293606d55cc1Saliguori          * the debug interrupt so that is will trigger after the
293706d55cc1Saliguori          * current instruction. */
293806d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
293906d55cc1Saliguori         return;
294006d55cc1Saliguori     }
29412e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
294272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2943b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2944b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
29456e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
29466e140f28Saliguori             if (!env->watchpoint_hit) {
2947a1d1bb31Saliguori                 env->watchpoint_hit = wp;
294806d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
294906d55cc1Saliguori                 if (!tb) {
29506e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
29516e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
295206d55cc1Saliguori                 }
2953618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
295406d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
295506d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
295606d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
2957488d6577SMax Filippov                     cpu_loop_exit(env);
295806d55cc1Saliguori                 } else {
295906d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
296006d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
296106d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
29620f459d16Spbrook                 }
2963488d6577SMax Filippov             }
29646e140f28Saliguori         } else {
29656e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
29666e140f28Saliguori         }
29670f459d16Spbrook     }
29680f459d16Spbrook }
29690f459d16Spbrook 
29706658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
29716658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
29726658ffb8Spbrook    phys routines.  */
29731ec9b909SAvi Kivity static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
29741ec9b909SAvi Kivity                                unsigned size)
29756658ffb8Spbrook {
29761ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
29771ec9b909SAvi Kivity     switch (size) {
29781ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
29791ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
29801ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
29811ec9b909SAvi Kivity     default: abort();
29821ec9b909SAvi Kivity     }
29836658ffb8Spbrook }
29846658ffb8Spbrook 
29851ec9b909SAvi Kivity static void watch_mem_write(void *opaque, target_phys_addr_t addr,
29861ec9b909SAvi Kivity                             uint64_t val, unsigned size)
29876658ffb8Spbrook {
29881ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
29891ec9b909SAvi Kivity     switch (size) {
299067364150SMax Filippov     case 1:
299167364150SMax Filippov         stb_phys(addr, val);
299267364150SMax Filippov         break;
299367364150SMax Filippov     case 2:
299467364150SMax Filippov         stw_phys(addr, val);
299567364150SMax Filippov         break;
299667364150SMax Filippov     case 4:
299767364150SMax Filippov         stl_phys(addr, val);
299867364150SMax Filippov         break;
29991ec9b909SAvi Kivity     default: abort();
30001ec9b909SAvi Kivity     }
30016658ffb8Spbrook }
30026658ffb8Spbrook 
30031ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
30041ec9b909SAvi Kivity     .read = watch_mem_read,
30051ec9b909SAvi Kivity     .write = watch_mem_write,
30061ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
30076658ffb8Spbrook };
30086658ffb8Spbrook 
300970c68e44SAvi Kivity static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
301070c68e44SAvi Kivity                              unsigned len)
3011db7b5426Sblueswir1 {
301270c68e44SAvi Kivity     subpage_t *mmio = opaque;
3013f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30145312bd8bSAvi Kivity     MemoryRegionSection *section;
3015db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3016db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3017db7b5426Sblueswir1            mmio, len, addr, idx);
3018db7b5426Sblueswir1 #endif
3019db7b5426Sblueswir1 
30205312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30215312bd8bSAvi Kivity     addr += mmio->base;
30225312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30235312bd8bSAvi Kivity     addr += section->offset_within_region;
302437ec01d4SAvi Kivity     return io_mem_read(section->mr, addr, len);
3025db7b5426Sblueswir1 }
3026db7b5426Sblueswir1 
302770c68e44SAvi Kivity static void subpage_write(void *opaque, target_phys_addr_t addr,
302870c68e44SAvi Kivity                           uint64_t value, unsigned len)
3029db7b5426Sblueswir1 {
303070c68e44SAvi Kivity     subpage_t *mmio = opaque;
3031f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30325312bd8bSAvi Kivity     MemoryRegionSection *section;
3033db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
303470c68e44SAvi Kivity     printf("%s: subpage %p len %d addr " TARGET_FMT_plx
303570c68e44SAvi Kivity            " idx %d value %"PRIx64"\n",
3036f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3037db7b5426Sblueswir1 #endif
3038f6405247SRichard Henderson 
30395312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30405312bd8bSAvi Kivity     addr += mmio->base;
30415312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30425312bd8bSAvi Kivity     addr += section->offset_within_region;
304337ec01d4SAvi Kivity     io_mem_write(section->mr, addr, value, len);
3044db7b5426Sblueswir1 }
3045db7b5426Sblueswir1 
304670c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
304770c68e44SAvi Kivity     .read = subpage_read,
304870c68e44SAvi Kivity     .write = subpage_write,
304970c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
3050db7b5426Sblueswir1 };
3051db7b5426Sblueswir1 
3052de712f94SAvi Kivity static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3053de712f94SAvi Kivity                                  unsigned size)
305456384e8bSAndreas Färber {
305556384e8bSAndreas Färber     ram_addr_t raddr = addr;
305656384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3057de712f94SAvi Kivity     switch (size) {
3058de712f94SAvi Kivity     case 1: return ldub_p(ptr);
3059de712f94SAvi Kivity     case 2: return lduw_p(ptr);
3060de712f94SAvi Kivity     case 4: return ldl_p(ptr);
3061de712f94SAvi Kivity     default: abort();
3062de712f94SAvi Kivity     }
306356384e8bSAndreas Färber }
306456384e8bSAndreas Färber 
3065de712f94SAvi Kivity static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3066de712f94SAvi Kivity                               uint64_t value, unsigned size)
306756384e8bSAndreas Färber {
306856384e8bSAndreas Färber     ram_addr_t raddr = addr;
306956384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3070de712f94SAvi Kivity     switch (size) {
3071de712f94SAvi Kivity     case 1: return stb_p(ptr, value);
3072de712f94SAvi Kivity     case 2: return stw_p(ptr, value);
3073de712f94SAvi Kivity     case 4: return stl_p(ptr, value);
3074de712f94SAvi Kivity     default: abort();
3075de712f94SAvi Kivity     }
307656384e8bSAndreas Färber }
307756384e8bSAndreas Färber 
3078de712f94SAvi Kivity static const MemoryRegionOps subpage_ram_ops = {
3079de712f94SAvi Kivity     .read = subpage_ram_read,
3080de712f94SAvi Kivity     .write = subpage_ram_write,
3081de712f94SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
308256384e8bSAndreas Färber };
308356384e8bSAndreas Färber 
3084c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
30855312bd8bSAvi Kivity                              uint16_t section)
3086db7b5426Sblueswir1 {
3087db7b5426Sblueswir1     int idx, eidx;
3088db7b5426Sblueswir1 
3089db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3090db7b5426Sblueswir1         return -1;
3091db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3092db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3093db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
30940bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3095db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3096db7b5426Sblueswir1 #endif
30975312bd8bSAvi Kivity     if (memory_region_is_ram(phys_sections[section].mr)) {
30985312bd8bSAvi Kivity         MemoryRegionSection new_section = phys_sections[section];
30995312bd8bSAvi Kivity         new_section.mr = &io_mem_subpage_ram;
31005312bd8bSAvi Kivity         section = phys_section_add(&new_section);
310156384e8bSAndreas Färber     }
3102db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
31035312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
3104db7b5426Sblueswir1     }
3105db7b5426Sblueswir1 
3106db7b5426Sblueswir1     return 0;
3107db7b5426Sblueswir1 }
3108db7b5426Sblueswir1 
31090f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base)
3110db7b5426Sblueswir1 {
3111c227f099SAnthony Liguori     subpage_t *mmio;
3112db7b5426Sblueswir1 
31137267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
31141eec614bSaliguori 
3115db7b5426Sblueswir1     mmio->base = base;
311670c68e44SAvi Kivity     memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
311770c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
3118b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
3119db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3120db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3121db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3122db7b5426Sblueswir1 #endif
31230f0cb164SAvi Kivity     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3124db7b5426Sblueswir1 
3125db7b5426Sblueswir1     return mmio;
3126db7b5426Sblueswir1 }
3127db7b5426Sblueswir1 
31285312bd8bSAvi Kivity static uint16_t dummy_section(MemoryRegion *mr)
31295312bd8bSAvi Kivity {
31305312bd8bSAvi Kivity     MemoryRegionSection section = {
31315312bd8bSAvi Kivity         .mr = mr,
31325312bd8bSAvi Kivity         .offset_within_address_space = 0,
31335312bd8bSAvi Kivity         .offset_within_region = 0,
31345312bd8bSAvi Kivity         .size = UINT64_MAX,
31355312bd8bSAvi Kivity     };
31365312bd8bSAvi Kivity 
31375312bd8bSAvi Kivity     return phys_section_add(&section);
31385312bd8bSAvi Kivity }
31395312bd8bSAvi Kivity 
314037ec01d4SAvi Kivity MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3141aa102231SAvi Kivity {
314237ec01d4SAvi Kivity     return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3143aa102231SAvi Kivity }
3144aa102231SAvi Kivity 
3145e9179ce1SAvi Kivity static void io_mem_init(void)
3146e9179ce1SAvi Kivity {
31470e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
31480e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
31490e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
31500e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
31510e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
31520e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
3153de712f94SAvi Kivity     memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3154de712f94SAvi Kivity                           "subpage-ram", UINT64_MAX);
31551ec9b909SAvi Kivity     memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
31561ec9b909SAvi Kivity                           "watch", UINT64_MAX);
3157e9179ce1SAvi Kivity }
3158e9179ce1SAvi Kivity 
315950c1e149SAvi Kivity static void core_begin(MemoryListener *listener)
316050c1e149SAvi Kivity {
316154688b1eSAvi Kivity     destroy_all_mappings();
31625312bd8bSAvi Kivity     phys_sections_clear();
3163c19e8800SAvi Kivity     phys_map.ptr = PHYS_MAP_NODE_NIL;
31645312bd8bSAvi Kivity     phys_section_unassigned = dummy_section(&io_mem_unassigned);
3165aa102231SAvi Kivity     phys_section_notdirty = dummy_section(&io_mem_notdirty);
3166aa102231SAvi Kivity     phys_section_rom = dummy_section(&io_mem_rom);
3167aa102231SAvi Kivity     phys_section_watch = dummy_section(&io_mem_watch);
316850c1e149SAvi Kivity }
316950c1e149SAvi Kivity 
317050c1e149SAvi Kivity static void core_commit(MemoryListener *listener)
317150c1e149SAvi Kivity {
31729349b4f9SAndreas Färber     CPUArchState *env;
3173117712c3SAvi Kivity 
3174117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
3175117712c3SAvi Kivity        reset the modified entries */
3176117712c3SAvi Kivity     /* XXX: slow ! */
3177117712c3SAvi Kivity     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3178117712c3SAvi Kivity         tlb_flush(env, 1);
3179117712c3SAvi Kivity     }
318050c1e149SAvi Kivity }
318150c1e149SAvi Kivity 
318293632747SAvi Kivity static void core_region_add(MemoryListener *listener,
318393632747SAvi Kivity                             MemoryRegionSection *section)
318493632747SAvi Kivity {
318593632747SAvi Kivity     cpu_register_physical_memory_log(section, section->readonly);
318693632747SAvi Kivity }
318793632747SAvi Kivity 
318893632747SAvi Kivity static void core_region_del(MemoryListener *listener,
318993632747SAvi Kivity                             MemoryRegionSection *section)
319093632747SAvi Kivity {
319193632747SAvi Kivity }
319293632747SAvi Kivity 
319350c1e149SAvi Kivity static void core_region_nop(MemoryListener *listener,
319450c1e149SAvi Kivity                             MemoryRegionSection *section)
319550c1e149SAvi Kivity {
319654688b1eSAvi Kivity     cpu_register_physical_memory_log(section, section->readonly);
319750c1e149SAvi Kivity }
319850c1e149SAvi Kivity 
319993632747SAvi Kivity static void core_log_start(MemoryListener *listener,
320093632747SAvi Kivity                            MemoryRegionSection *section)
320193632747SAvi Kivity {
320293632747SAvi Kivity }
320393632747SAvi Kivity 
320493632747SAvi Kivity static void core_log_stop(MemoryListener *listener,
320593632747SAvi Kivity                           MemoryRegionSection *section)
320693632747SAvi Kivity {
320793632747SAvi Kivity }
320893632747SAvi Kivity 
320993632747SAvi Kivity static void core_log_sync(MemoryListener *listener,
321093632747SAvi Kivity                           MemoryRegionSection *section)
321193632747SAvi Kivity {
321293632747SAvi Kivity }
321393632747SAvi Kivity 
321493632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
321593632747SAvi Kivity {
321693632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
321793632747SAvi Kivity }
321893632747SAvi Kivity 
321993632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
322093632747SAvi Kivity {
322193632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
322293632747SAvi Kivity }
322393632747SAvi Kivity 
322493632747SAvi Kivity static void core_eventfd_add(MemoryListener *listener,
322593632747SAvi Kivity                              MemoryRegionSection *section,
322693632747SAvi Kivity                              bool match_data, uint64_t data, int fd)
322793632747SAvi Kivity {
322893632747SAvi Kivity }
322993632747SAvi Kivity 
323093632747SAvi Kivity static void core_eventfd_del(MemoryListener *listener,
323193632747SAvi Kivity                              MemoryRegionSection *section,
323293632747SAvi Kivity                              bool match_data, uint64_t data, int fd)
323393632747SAvi Kivity {
323493632747SAvi Kivity }
323593632747SAvi Kivity 
323650c1e149SAvi Kivity static void io_begin(MemoryListener *listener)
323750c1e149SAvi Kivity {
323850c1e149SAvi Kivity }
323950c1e149SAvi Kivity 
324050c1e149SAvi Kivity static void io_commit(MemoryListener *listener)
324150c1e149SAvi Kivity {
324250c1e149SAvi Kivity }
324350c1e149SAvi Kivity 
32444855d41aSAvi Kivity static void io_region_add(MemoryListener *listener,
32454855d41aSAvi Kivity                           MemoryRegionSection *section)
32464855d41aSAvi Kivity {
3247a2d33521SAvi Kivity     MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3248a2d33521SAvi Kivity 
3249a2d33521SAvi Kivity     mrio->mr = section->mr;
3250a2d33521SAvi Kivity     mrio->offset = section->offset_within_region;
3251a2d33521SAvi Kivity     iorange_init(&mrio->iorange, &memory_region_iorange_ops,
32524855d41aSAvi Kivity                  section->offset_within_address_space, section->size);
3253a2d33521SAvi Kivity     ioport_register(&mrio->iorange);
32544855d41aSAvi Kivity }
32554855d41aSAvi Kivity 
32564855d41aSAvi Kivity static void io_region_del(MemoryListener *listener,
32574855d41aSAvi Kivity                           MemoryRegionSection *section)
32584855d41aSAvi Kivity {
32594855d41aSAvi Kivity     isa_unassign_ioport(section->offset_within_address_space, section->size);
32604855d41aSAvi Kivity }
32614855d41aSAvi Kivity 
326250c1e149SAvi Kivity static void io_region_nop(MemoryListener *listener,
326350c1e149SAvi Kivity                           MemoryRegionSection *section)
326450c1e149SAvi Kivity {
326550c1e149SAvi Kivity }
326650c1e149SAvi Kivity 
32674855d41aSAvi Kivity static void io_log_start(MemoryListener *listener,
32684855d41aSAvi Kivity                          MemoryRegionSection *section)
32694855d41aSAvi Kivity {
32704855d41aSAvi Kivity }
32714855d41aSAvi Kivity 
32724855d41aSAvi Kivity static void io_log_stop(MemoryListener *listener,
32734855d41aSAvi Kivity                         MemoryRegionSection *section)
32744855d41aSAvi Kivity {
32754855d41aSAvi Kivity }
32764855d41aSAvi Kivity 
32774855d41aSAvi Kivity static void io_log_sync(MemoryListener *listener,
32784855d41aSAvi Kivity                         MemoryRegionSection *section)
32794855d41aSAvi Kivity {
32804855d41aSAvi Kivity }
32814855d41aSAvi Kivity 
32824855d41aSAvi Kivity static void io_log_global_start(MemoryListener *listener)
32834855d41aSAvi Kivity {
32844855d41aSAvi Kivity }
32854855d41aSAvi Kivity 
32864855d41aSAvi Kivity static void io_log_global_stop(MemoryListener *listener)
32874855d41aSAvi Kivity {
32884855d41aSAvi Kivity }
32894855d41aSAvi Kivity 
32904855d41aSAvi Kivity static void io_eventfd_add(MemoryListener *listener,
32914855d41aSAvi Kivity                            MemoryRegionSection *section,
32924855d41aSAvi Kivity                            bool match_data, uint64_t data, int fd)
32934855d41aSAvi Kivity {
32944855d41aSAvi Kivity }
32954855d41aSAvi Kivity 
32964855d41aSAvi Kivity static void io_eventfd_del(MemoryListener *listener,
32974855d41aSAvi Kivity                            MemoryRegionSection *section,
32984855d41aSAvi Kivity                            bool match_data, uint64_t data, int fd)
32994855d41aSAvi Kivity {
33004855d41aSAvi Kivity }
33014855d41aSAvi Kivity 
330293632747SAvi Kivity static MemoryListener core_memory_listener = {
330350c1e149SAvi Kivity     .begin = core_begin,
330450c1e149SAvi Kivity     .commit = core_commit,
330593632747SAvi Kivity     .region_add = core_region_add,
330693632747SAvi Kivity     .region_del = core_region_del,
330750c1e149SAvi Kivity     .region_nop = core_region_nop,
330893632747SAvi Kivity     .log_start = core_log_start,
330993632747SAvi Kivity     .log_stop = core_log_stop,
331093632747SAvi Kivity     .log_sync = core_log_sync,
331193632747SAvi Kivity     .log_global_start = core_log_global_start,
331293632747SAvi Kivity     .log_global_stop = core_log_global_stop,
331393632747SAvi Kivity     .eventfd_add = core_eventfd_add,
331493632747SAvi Kivity     .eventfd_del = core_eventfd_del,
331593632747SAvi Kivity     .priority = 0,
331693632747SAvi Kivity };
331793632747SAvi Kivity 
33184855d41aSAvi Kivity static MemoryListener io_memory_listener = {
331950c1e149SAvi Kivity     .begin = io_begin,
332050c1e149SAvi Kivity     .commit = io_commit,
33214855d41aSAvi Kivity     .region_add = io_region_add,
33224855d41aSAvi Kivity     .region_del = io_region_del,
332350c1e149SAvi Kivity     .region_nop = io_region_nop,
33244855d41aSAvi Kivity     .log_start = io_log_start,
33254855d41aSAvi Kivity     .log_stop = io_log_stop,
33264855d41aSAvi Kivity     .log_sync = io_log_sync,
33274855d41aSAvi Kivity     .log_global_start = io_log_global_start,
33284855d41aSAvi Kivity     .log_global_stop = io_log_global_stop,
33294855d41aSAvi Kivity     .eventfd_add = io_eventfd_add,
33304855d41aSAvi Kivity     .eventfd_del = io_eventfd_del,
33314855d41aSAvi Kivity     .priority = 0,
33324855d41aSAvi Kivity };
33334855d41aSAvi Kivity 
333462152b8aSAvi Kivity static void memory_map_init(void)
333562152b8aSAvi Kivity {
33367267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
33378417cebfSAvi Kivity     memory_region_init(system_memory, "system", INT64_MAX);
333862152b8aSAvi Kivity     set_system_memory_map(system_memory);
3339309cb471SAvi Kivity 
33407267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
3341309cb471SAvi Kivity     memory_region_init(system_io, "io", 65536);
3342309cb471SAvi Kivity     set_system_io_map(system_io);
334393632747SAvi Kivity 
33444855d41aSAvi Kivity     memory_listener_register(&core_memory_listener, system_memory);
33454855d41aSAvi Kivity     memory_listener_register(&io_memory_listener, system_io);
334662152b8aSAvi Kivity }
334762152b8aSAvi Kivity 
334862152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
334962152b8aSAvi Kivity {
335062152b8aSAvi Kivity     return system_memory;
335162152b8aSAvi Kivity }
335262152b8aSAvi Kivity 
3353309cb471SAvi Kivity MemoryRegion *get_system_io(void)
3354309cb471SAvi Kivity {
3355309cb471SAvi Kivity     return system_io;
3356309cb471SAvi Kivity }
3357309cb471SAvi Kivity 
3358e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3359e2eef170Spbrook 
336013eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
336113eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
33629349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3363a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
336413eb76e0Sbellard {
336513eb76e0Sbellard     int l, flags;
336613eb76e0Sbellard     target_ulong page;
336753a5960aSpbrook     void * p;
336813eb76e0Sbellard 
336913eb76e0Sbellard     while (len > 0) {
337013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
337113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
337213eb76e0Sbellard         if (l > len)
337313eb76e0Sbellard             l = len;
337413eb76e0Sbellard         flags = page_get_flags(page);
337513eb76e0Sbellard         if (!(flags & PAGE_VALID))
3376a68fe89cSPaul Brook             return -1;
337713eb76e0Sbellard         if (is_write) {
337813eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3379a68fe89cSPaul Brook                 return -1;
3380579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
338172fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3382a68fe89cSPaul Brook                 return -1;
338372fb7daaSaurel32             memcpy(p, buf, l);
338472fb7daaSaurel32             unlock_user(p, addr, l);
338513eb76e0Sbellard         } else {
338613eb76e0Sbellard             if (!(flags & PAGE_READ))
3387a68fe89cSPaul Brook                 return -1;
3388579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
338972fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3390a68fe89cSPaul Brook                 return -1;
339172fb7daaSaurel32             memcpy(buf, p, l);
33925b257578Saurel32             unlock_user(p, addr, 0);
339313eb76e0Sbellard         }
339413eb76e0Sbellard         len -= l;
339513eb76e0Sbellard         buf += l;
339613eb76e0Sbellard         addr += l;
339713eb76e0Sbellard     }
3398a68fe89cSPaul Brook     return 0;
339913eb76e0Sbellard }
34008df1cd07Sbellard 
340113eb76e0Sbellard #else
3402c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
340313eb76e0Sbellard                             int len, int is_write)
340413eb76e0Sbellard {
340537ec01d4SAvi Kivity     int l;
340613eb76e0Sbellard     uint8_t *ptr;
340713eb76e0Sbellard     uint32_t val;
3408c227f099SAnthony Liguori     target_phys_addr_t page;
3409f3705d53SAvi Kivity     MemoryRegionSection *section;
341013eb76e0Sbellard 
341113eb76e0Sbellard     while (len > 0) {
341213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
341313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
341413eb76e0Sbellard         if (l > len)
341513eb76e0Sbellard             l = len;
341606ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
341713eb76e0Sbellard 
341813eb76e0Sbellard         if (is_write) {
3419f3705d53SAvi Kivity             if (!memory_region_is_ram(section->mr)) {
3420f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
3421cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
34226a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
34236a00d601Sbellard                    potential bugs */
34246c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
34251c213d19Sbellard                     /* 32 bit write access */
3426c27004ecSbellard                     val = ldl_p(buf);
342737ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 4);
342813eb76e0Sbellard                     l = 4;
34296c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
34301c213d19Sbellard                     /* 16 bit write access */
3431c27004ecSbellard                     val = lduw_p(buf);
343237ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 2);
343313eb76e0Sbellard                     l = 2;
343413eb76e0Sbellard                 } else {
34351c213d19Sbellard                     /* 8 bit write access */
3436c27004ecSbellard                     val = ldub_p(buf);
343737ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 1);
343813eb76e0Sbellard                     l = 1;
343913eb76e0Sbellard                 }
3440f3705d53SAvi Kivity             } else if (!section->readonly) {
34418ca5692dSAnthony PERARD                 ram_addr_t addr1;
3442f3705d53SAvi Kivity                 addr1 = memory_region_get_ram_addr(section->mr)
3443cc5bea60SBlue Swirl                     + memory_region_section_addr(section, addr);
344413eb76e0Sbellard                 /* RAM case */
34455579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
344613eb76e0Sbellard                 memcpy(ptr, buf, l);
34473a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3448b448f2f3Sbellard                     /* invalidate code */
3449b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3450b448f2f3Sbellard                     /* set dirty bit */
3451f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3452f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
345313eb76e0Sbellard                 }
3454050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
34553a7d929eSbellard             }
345613eb76e0Sbellard         } else {
3457cc5bea60SBlue Swirl             if (!(memory_region_is_ram(section->mr) ||
3458cc5bea60SBlue Swirl                   memory_region_is_romd(section->mr))) {
3459f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
346013eb76e0Sbellard                 /* I/O case */
3461cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
34626c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
346313eb76e0Sbellard                     /* 32 bit read access */
346437ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 4);
3465c27004ecSbellard                     stl_p(buf, val);
346613eb76e0Sbellard                     l = 4;
34676c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
346813eb76e0Sbellard                     /* 16 bit read access */
346937ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 2);
3470c27004ecSbellard                     stw_p(buf, val);
347113eb76e0Sbellard                     l = 2;
347213eb76e0Sbellard                 } else {
34731c213d19Sbellard                     /* 8 bit read access */
347437ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 1);
3475c27004ecSbellard                     stb_p(buf, val);
347613eb76e0Sbellard                     l = 1;
347713eb76e0Sbellard                 }
347813eb76e0Sbellard             } else {
347913eb76e0Sbellard                 /* RAM case */
34800a1b357fSAnthony PERARD                 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3481cc5bea60SBlue Swirl                                        + memory_region_section_addr(section,
3482cc5bea60SBlue Swirl                                                                     addr));
3483f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
3484050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
348513eb76e0Sbellard             }
348613eb76e0Sbellard         }
348713eb76e0Sbellard         len -= l;
348813eb76e0Sbellard         buf += l;
348913eb76e0Sbellard         addr += l;
349013eb76e0Sbellard     }
349113eb76e0Sbellard }
34928df1cd07Sbellard 
3493d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3494c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3495d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3496d0ecd2aaSbellard {
3497d0ecd2aaSbellard     int l;
3498d0ecd2aaSbellard     uint8_t *ptr;
3499c227f099SAnthony Liguori     target_phys_addr_t page;
3500f3705d53SAvi Kivity     MemoryRegionSection *section;
3501d0ecd2aaSbellard 
3502d0ecd2aaSbellard     while (len > 0) {
3503d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3504d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3505d0ecd2aaSbellard         if (l > len)
3506d0ecd2aaSbellard             l = len;
350706ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
3508d0ecd2aaSbellard 
3509cc5bea60SBlue Swirl         if (!(memory_region_is_ram(section->mr) ||
3510cc5bea60SBlue Swirl               memory_region_is_romd(section->mr))) {
3511d0ecd2aaSbellard             /* do nothing */
3512d0ecd2aaSbellard         } else {
3513d0ecd2aaSbellard             unsigned long addr1;
3514f3705d53SAvi Kivity             addr1 = memory_region_get_ram_addr(section->mr)
3515cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
3516d0ecd2aaSbellard             /* ROM/RAM case */
35175579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3518d0ecd2aaSbellard             memcpy(ptr, buf, l);
3519050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
3520d0ecd2aaSbellard         }
3521d0ecd2aaSbellard         len -= l;
3522d0ecd2aaSbellard         buf += l;
3523d0ecd2aaSbellard         addr += l;
3524d0ecd2aaSbellard     }
3525d0ecd2aaSbellard }
3526d0ecd2aaSbellard 
35276d16c2f8Saliguori typedef struct {
35286d16c2f8Saliguori     void *buffer;
3529c227f099SAnthony Liguori     target_phys_addr_t addr;
3530c227f099SAnthony Liguori     target_phys_addr_t len;
35316d16c2f8Saliguori } BounceBuffer;
35326d16c2f8Saliguori 
35336d16c2f8Saliguori static BounceBuffer bounce;
35346d16c2f8Saliguori 
3535ba223c29Saliguori typedef struct MapClient {
3536ba223c29Saliguori     void *opaque;
3537ba223c29Saliguori     void (*callback)(void *opaque);
353872cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3539ba223c29Saliguori } MapClient;
3540ba223c29Saliguori 
354172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
354272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3543ba223c29Saliguori 
3544ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3545ba223c29Saliguori {
35467267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
3547ba223c29Saliguori 
3548ba223c29Saliguori     client->opaque = opaque;
3549ba223c29Saliguori     client->callback = callback;
355072cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3551ba223c29Saliguori     return client;
3552ba223c29Saliguori }
3553ba223c29Saliguori 
3554ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3555ba223c29Saliguori {
3556ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3557ba223c29Saliguori 
355872cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
35597267c094SAnthony Liguori     g_free(client);
3560ba223c29Saliguori }
3561ba223c29Saliguori 
3562ba223c29Saliguori static void cpu_notify_map_clients(void)
3563ba223c29Saliguori {
3564ba223c29Saliguori     MapClient *client;
3565ba223c29Saliguori 
356672cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
356772cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3568ba223c29Saliguori         client->callback(client->opaque);
356934d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3570ba223c29Saliguori     }
3571ba223c29Saliguori }
3572ba223c29Saliguori 
35736d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
35746d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
35756d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
35766d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3577ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3578ba223c29Saliguori  * likely to succeed.
35796d16c2f8Saliguori  */
3580c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3581c227f099SAnthony Liguori                               target_phys_addr_t *plen,
35826d16c2f8Saliguori                               int is_write)
35836d16c2f8Saliguori {
3584c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
358538bee5dcSStefano Stabellini     target_phys_addr_t todo = 0;
35866d16c2f8Saliguori     int l;
3587c227f099SAnthony Liguori     target_phys_addr_t page;
3588f3705d53SAvi Kivity     MemoryRegionSection *section;
3589f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
35908ab934f9SStefano Stabellini     ram_addr_t rlen;
35918ab934f9SStefano Stabellini     void *ret;
35926d16c2f8Saliguori 
35936d16c2f8Saliguori     while (len > 0) {
35946d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
35956d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
35966d16c2f8Saliguori         if (l > len)
35976d16c2f8Saliguori             l = len;
359806ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
35996d16c2f8Saliguori 
3600f3705d53SAvi Kivity         if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
360138bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
36026d16c2f8Saliguori                 break;
36036d16c2f8Saliguori             }
36046d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
36056d16c2f8Saliguori             bounce.addr = addr;
36066d16c2f8Saliguori             bounce.len = l;
36076d16c2f8Saliguori             if (!is_write) {
360854f7b4a3SStefan Weil                 cpu_physical_memory_read(addr, bounce.buffer, l);
36096d16c2f8Saliguori             }
361038bee5dcSStefano Stabellini 
361138bee5dcSStefano Stabellini             *plen = l;
361238bee5dcSStefano Stabellini             return bounce.buffer;
36136d16c2f8Saliguori         }
36148ab934f9SStefano Stabellini         if (!todo) {
3615f3705d53SAvi Kivity             raddr = memory_region_get_ram_addr(section->mr)
3616cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
36178ab934f9SStefano Stabellini         }
36186d16c2f8Saliguori 
36196d16c2f8Saliguori         len -= l;
36206d16c2f8Saliguori         addr += l;
362138bee5dcSStefano Stabellini         todo += l;
36226d16c2f8Saliguori     }
36238ab934f9SStefano Stabellini     rlen = todo;
36248ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
36258ab934f9SStefano Stabellini     *plen = rlen;
36268ab934f9SStefano Stabellini     return ret;
36276d16c2f8Saliguori }
36286d16c2f8Saliguori 
36296d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
36306d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
36316d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
36326d16c2f8Saliguori  */
3633c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3634c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
36356d16c2f8Saliguori {
36366d16c2f8Saliguori     if (buffer != bounce.buffer) {
36376d16c2f8Saliguori         if (is_write) {
3638e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
36396d16c2f8Saliguori             while (access_len) {
36406d16c2f8Saliguori                 unsigned l;
36416d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
36426d16c2f8Saliguori                 if (l > access_len)
36436d16c2f8Saliguori                     l = access_len;
36446d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
36456d16c2f8Saliguori                     /* invalidate code */
36466d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
36476d16c2f8Saliguori                     /* set dirty bit */
3648f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3649f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
36506d16c2f8Saliguori                 }
36516d16c2f8Saliguori                 addr1 += l;
36526d16c2f8Saliguori                 access_len -= l;
36536d16c2f8Saliguori             }
36546d16c2f8Saliguori         }
3655868bb33fSJan Kiszka         if (xen_enabled()) {
3656e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3657050a0ddfSAnthony PERARD         }
36586d16c2f8Saliguori         return;
36596d16c2f8Saliguori     }
36606d16c2f8Saliguori     if (is_write) {
36616d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
36626d16c2f8Saliguori     }
3663f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
36646d16c2f8Saliguori     bounce.buffer = NULL;
3665ba223c29Saliguori     cpu_notify_map_clients();
36666d16c2f8Saliguori }
3667d0ecd2aaSbellard 
36688df1cd07Sbellard /* warning: addr must be aligned */
36691e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
36701e78bcc1SAlexander Graf                                          enum device_endian endian)
36718df1cd07Sbellard {
36728df1cd07Sbellard     uint8_t *ptr;
36738df1cd07Sbellard     uint32_t val;
3674f3705d53SAvi Kivity     MemoryRegionSection *section;
36758df1cd07Sbellard 
367606ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
36778df1cd07Sbellard 
3678cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3679cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
36808df1cd07Sbellard         /* I/O case */
3681cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
368237ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
36831e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
36841e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
36851e78bcc1SAlexander Graf             val = bswap32(val);
36861e78bcc1SAlexander Graf         }
36871e78bcc1SAlexander Graf #else
36881e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
36891e78bcc1SAlexander Graf             val = bswap32(val);
36901e78bcc1SAlexander Graf         }
36911e78bcc1SAlexander Graf #endif
36928df1cd07Sbellard     } else {
36938df1cd07Sbellard         /* RAM case */
3694f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
369506ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3696cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
36971e78bcc1SAlexander Graf         switch (endian) {
36981e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
36991e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
37001e78bcc1SAlexander Graf             break;
37011e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
37021e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
37031e78bcc1SAlexander Graf             break;
37041e78bcc1SAlexander Graf         default:
37058df1cd07Sbellard             val = ldl_p(ptr);
37061e78bcc1SAlexander Graf             break;
37071e78bcc1SAlexander Graf         }
37088df1cd07Sbellard     }
37098df1cd07Sbellard     return val;
37108df1cd07Sbellard }
37118df1cd07Sbellard 
37121e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr)
37131e78bcc1SAlexander Graf {
37141e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
37151e78bcc1SAlexander Graf }
37161e78bcc1SAlexander Graf 
37171e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr)
37181e78bcc1SAlexander Graf {
37191e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
37201e78bcc1SAlexander Graf }
37211e78bcc1SAlexander Graf 
37221e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr)
37231e78bcc1SAlexander Graf {
37241e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
37251e78bcc1SAlexander Graf }
37261e78bcc1SAlexander Graf 
372784b7b8e7Sbellard /* warning: addr must be aligned */
37281e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
37291e78bcc1SAlexander Graf                                          enum device_endian endian)
373084b7b8e7Sbellard {
373184b7b8e7Sbellard     uint8_t *ptr;
373284b7b8e7Sbellard     uint64_t val;
3733f3705d53SAvi Kivity     MemoryRegionSection *section;
373484b7b8e7Sbellard 
373506ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
373684b7b8e7Sbellard 
3737cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3738cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
373984b7b8e7Sbellard         /* I/O case */
3740cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
37411e78bcc1SAlexander Graf 
37421e78bcc1SAlexander Graf         /* XXX This is broken when device endian != cpu endian.
37431e78bcc1SAlexander Graf                Fix and add "endian" variable check */
374484b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
374537ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4) << 32;
374637ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4);
374784b7b8e7Sbellard #else
374837ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
374937ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4) << 32;
375084b7b8e7Sbellard #endif
375184b7b8e7Sbellard     } else {
375284b7b8e7Sbellard         /* RAM case */
3753f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
375406ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3755cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
37561e78bcc1SAlexander Graf         switch (endian) {
37571e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
37581e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
37591e78bcc1SAlexander Graf             break;
37601e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
37611e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
37621e78bcc1SAlexander Graf             break;
37631e78bcc1SAlexander Graf         default:
376484b7b8e7Sbellard             val = ldq_p(ptr);
37651e78bcc1SAlexander Graf             break;
37661e78bcc1SAlexander Graf         }
376784b7b8e7Sbellard     }
376884b7b8e7Sbellard     return val;
376984b7b8e7Sbellard }
377084b7b8e7Sbellard 
37711e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr)
37721e78bcc1SAlexander Graf {
37731e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
37741e78bcc1SAlexander Graf }
37751e78bcc1SAlexander Graf 
37761e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr)
37771e78bcc1SAlexander Graf {
37781e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
37791e78bcc1SAlexander Graf }
37801e78bcc1SAlexander Graf 
37811e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr)
37821e78bcc1SAlexander Graf {
37831e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
37841e78bcc1SAlexander Graf }
37851e78bcc1SAlexander Graf 
3786aab33094Sbellard /* XXX: optimize */
3787c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3788aab33094Sbellard {
3789aab33094Sbellard     uint8_t val;
3790aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3791aab33094Sbellard     return val;
3792aab33094Sbellard }
3793aab33094Sbellard 
3794733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
37951e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
37961e78bcc1SAlexander Graf                                           enum device_endian endian)
3797aab33094Sbellard {
3798733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3799733f0b02SMichael S. Tsirkin     uint64_t val;
3800f3705d53SAvi Kivity     MemoryRegionSection *section;
3801733f0b02SMichael S. Tsirkin 
380206ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
3803733f0b02SMichael S. Tsirkin 
3804cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3805cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
3806733f0b02SMichael S. Tsirkin         /* I/O case */
3807cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
380837ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 2);
38091e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
38101e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
38111e78bcc1SAlexander Graf             val = bswap16(val);
38121e78bcc1SAlexander Graf         }
38131e78bcc1SAlexander Graf #else
38141e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
38151e78bcc1SAlexander Graf             val = bswap16(val);
38161e78bcc1SAlexander Graf         }
38171e78bcc1SAlexander Graf #endif
3818733f0b02SMichael S. Tsirkin     } else {
3819733f0b02SMichael S. Tsirkin         /* RAM case */
3820f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
382106ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3822cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
38231e78bcc1SAlexander Graf         switch (endian) {
38241e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
38251e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
38261e78bcc1SAlexander Graf             break;
38271e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
38281e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
38291e78bcc1SAlexander Graf             break;
38301e78bcc1SAlexander Graf         default:
3831733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
38321e78bcc1SAlexander Graf             break;
38331e78bcc1SAlexander Graf         }
3834733f0b02SMichael S. Tsirkin     }
3835733f0b02SMichael S. Tsirkin     return val;
3836aab33094Sbellard }
3837aab33094Sbellard 
38381e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr)
38391e78bcc1SAlexander Graf {
38401e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
38411e78bcc1SAlexander Graf }
38421e78bcc1SAlexander Graf 
38431e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr)
38441e78bcc1SAlexander Graf {
38451e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
38461e78bcc1SAlexander Graf }
38471e78bcc1SAlexander Graf 
38481e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr)
38491e78bcc1SAlexander Graf {
38501e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
38511e78bcc1SAlexander Graf }
38521e78bcc1SAlexander Graf 
38538df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
38548df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
38558df1cd07Sbellard    bits are used to track modified PTEs */
3856c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
38578df1cd07Sbellard {
38588df1cd07Sbellard     uint8_t *ptr;
3859f3705d53SAvi Kivity     MemoryRegionSection *section;
38608df1cd07Sbellard 
386106ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
38628df1cd07Sbellard 
3863f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3864cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
386537ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
386637ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
386737ec01d4SAvi Kivity         }
386837ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
38698df1cd07Sbellard     } else {
3870f3705d53SAvi Kivity         unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
387106ef3525SAvi Kivity                                & TARGET_PAGE_MASK)
3872cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
38735579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
38748df1cd07Sbellard         stl_p(ptr, val);
387574576198Saliguori 
387674576198Saliguori         if (unlikely(in_migration)) {
387774576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
387874576198Saliguori                 /* invalidate code */
387974576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
388074576198Saliguori                 /* set dirty bit */
3881f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
3882f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
388374576198Saliguori             }
388474576198Saliguori         }
38858df1cd07Sbellard     }
38868df1cd07Sbellard }
38878df1cd07Sbellard 
3888c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3889bc98a7efSj_mayer {
3890bc98a7efSj_mayer     uint8_t *ptr;
3891f3705d53SAvi Kivity     MemoryRegionSection *section;
3892bc98a7efSj_mayer 
389306ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
3894bc98a7efSj_mayer 
3895f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3896cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
389737ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
389837ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
389937ec01d4SAvi Kivity         }
3900bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
390137ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val >> 32, 4);
390237ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3903bc98a7efSj_mayer #else
390437ec01d4SAvi Kivity         io_mem_write(section->mr, addr, (uint32_t)val, 4);
390537ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, val >> 32, 4);
3906bc98a7efSj_mayer #endif
3907bc98a7efSj_mayer     } else {
3908f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
390906ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3910cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
3911bc98a7efSj_mayer         stq_p(ptr, val);
3912bc98a7efSj_mayer     }
3913bc98a7efSj_mayer }
3914bc98a7efSj_mayer 
39158df1cd07Sbellard /* warning: addr must be aligned */
39161e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
39171e78bcc1SAlexander Graf                                      enum device_endian endian)
39188df1cd07Sbellard {
39198df1cd07Sbellard     uint8_t *ptr;
3920f3705d53SAvi Kivity     MemoryRegionSection *section;
39218df1cd07Sbellard 
392206ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
39238df1cd07Sbellard 
3924f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3925cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
392637ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
392737ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
392837ec01d4SAvi Kivity         }
39291e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
39301e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
39311e78bcc1SAlexander Graf             val = bswap32(val);
39321e78bcc1SAlexander Graf         }
39331e78bcc1SAlexander Graf #else
39341e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
39351e78bcc1SAlexander Graf             val = bswap32(val);
39361e78bcc1SAlexander Graf         }
39371e78bcc1SAlexander Graf #endif
393837ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
39398df1cd07Sbellard     } else {
39408df1cd07Sbellard         unsigned long addr1;
3941f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3942cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
39438df1cd07Sbellard         /* RAM case */
39445579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
39451e78bcc1SAlexander Graf         switch (endian) {
39461e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
39471e78bcc1SAlexander Graf             stl_le_p(ptr, val);
39481e78bcc1SAlexander Graf             break;
39491e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
39501e78bcc1SAlexander Graf             stl_be_p(ptr, val);
39511e78bcc1SAlexander Graf             break;
39521e78bcc1SAlexander Graf         default:
39538df1cd07Sbellard             stl_p(ptr, val);
39541e78bcc1SAlexander Graf             break;
39551e78bcc1SAlexander Graf         }
39563a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
39578df1cd07Sbellard             /* invalidate code */
39588df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
39598df1cd07Sbellard             /* set dirty bit */
3960f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
3961f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
39628df1cd07Sbellard         }
39638df1cd07Sbellard     }
39643a7d929eSbellard }
39658df1cd07Sbellard 
39661e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val)
39671e78bcc1SAlexander Graf {
39681e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
39691e78bcc1SAlexander Graf }
39701e78bcc1SAlexander Graf 
39711e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val)
39721e78bcc1SAlexander Graf {
39731e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
39741e78bcc1SAlexander Graf }
39751e78bcc1SAlexander Graf 
39761e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val)
39771e78bcc1SAlexander Graf {
39781e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
39791e78bcc1SAlexander Graf }
39801e78bcc1SAlexander Graf 
3981aab33094Sbellard /* XXX: optimize */
3982c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3983aab33094Sbellard {
3984aab33094Sbellard     uint8_t v = val;
3985aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3986aab33094Sbellard }
3987aab33094Sbellard 
3988733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
39891e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
39901e78bcc1SAlexander Graf                                      enum device_endian endian)
3991aab33094Sbellard {
3992733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3993f3705d53SAvi Kivity     MemoryRegionSection *section;
3994733f0b02SMichael S. Tsirkin 
399506ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
3996733f0b02SMichael S. Tsirkin 
3997f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3998cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
399937ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
400037ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
400137ec01d4SAvi Kivity         }
40021e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
40031e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
40041e78bcc1SAlexander Graf             val = bswap16(val);
40051e78bcc1SAlexander Graf         }
40061e78bcc1SAlexander Graf #else
40071e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
40081e78bcc1SAlexander Graf             val = bswap16(val);
40091e78bcc1SAlexander Graf         }
40101e78bcc1SAlexander Graf #endif
401137ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 2);
4012733f0b02SMichael S. Tsirkin     } else {
4013733f0b02SMichael S. Tsirkin         unsigned long addr1;
4014f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4015cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
4016733f0b02SMichael S. Tsirkin         /* RAM case */
4017733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
40181e78bcc1SAlexander Graf         switch (endian) {
40191e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
40201e78bcc1SAlexander Graf             stw_le_p(ptr, val);
40211e78bcc1SAlexander Graf             break;
40221e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
40231e78bcc1SAlexander Graf             stw_be_p(ptr, val);
40241e78bcc1SAlexander Graf             break;
40251e78bcc1SAlexander Graf         default:
4026733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
40271e78bcc1SAlexander Graf             break;
40281e78bcc1SAlexander Graf         }
4029733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
4030733f0b02SMichael S. Tsirkin             /* invalidate code */
4031733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4032733f0b02SMichael S. Tsirkin             /* set dirty bit */
4033733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
4034733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
4035733f0b02SMichael S. Tsirkin         }
4036733f0b02SMichael S. Tsirkin     }
4037aab33094Sbellard }
4038aab33094Sbellard 
40391e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val)
40401e78bcc1SAlexander Graf {
40411e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
40421e78bcc1SAlexander Graf }
40431e78bcc1SAlexander Graf 
40441e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val)
40451e78bcc1SAlexander Graf {
40461e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
40471e78bcc1SAlexander Graf }
40481e78bcc1SAlexander Graf 
40491e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val)
40501e78bcc1SAlexander Graf {
40511e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
40521e78bcc1SAlexander Graf }
40531e78bcc1SAlexander Graf 
4054aab33094Sbellard /* XXX: optimize */
4055c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4056aab33094Sbellard {
4057aab33094Sbellard     val = tswap64(val);
405871d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4059aab33094Sbellard }
4060aab33094Sbellard 
40611e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val)
40621e78bcc1SAlexander Graf {
40631e78bcc1SAlexander Graf     val = cpu_to_le64(val);
40641e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40651e78bcc1SAlexander Graf }
40661e78bcc1SAlexander Graf 
40671e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val)
40681e78bcc1SAlexander Graf {
40691e78bcc1SAlexander Graf     val = cpu_to_be64(val);
40701e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40711e78bcc1SAlexander Graf }
40721e78bcc1SAlexander Graf 
40735e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
40749349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4075b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
407613eb76e0Sbellard {
407713eb76e0Sbellard     int l;
4078c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
40799b3c35e0Sj_mayer     target_ulong page;
408013eb76e0Sbellard 
408113eb76e0Sbellard     while (len > 0) {
408213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
408313eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
408413eb76e0Sbellard         /* if no physical page mapped, return an error */
408513eb76e0Sbellard         if (phys_addr == -1)
408613eb76e0Sbellard             return -1;
408713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
408813eb76e0Sbellard         if (l > len)
408913eb76e0Sbellard             l = len;
40905e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
40915e2972fdSaliguori         if (is_write)
40925e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
40935e2972fdSaliguori         else
40945e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
409513eb76e0Sbellard         len -= l;
409613eb76e0Sbellard         buf += l;
409713eb76e0Sbellard         addr += l;
409813eb76e0Sbellard     }
409913eb76e0Sbellard     return 0;
410013eb76e0Sbellard }
4101a68fe89cSPaul Brook #endif
410213eb76e0Sbellard 
41032e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
41042e70f6efSpbrook    must be at the end of the TB */
410520503968SBlue Swirl void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
41062e70f6efSpbrook {
41072e70f6efSpbrook     TranslationBlock *tb;
41082e70f6efSpbrook     uint32_t n, cflags;
41092e70f6efSpbrook     target_ulong pc, cs_base;
41102e70f6efSpbrook     uint64_t flags;
41112e70f6efSpbrook 
411220503968SBlue Swirl     tb = tb_find_pc(retaddr);
41132e70f6efSpbrook     if (!tb) {
41142e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
411520503968SBlue Swirl                   (void *)retaddr);
41162e70f6efSpbrook     }
41172e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
411820503968SBlue Swirl     cpu_restore_state(tb, env, retaddr);
41192e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4120bf20dc07Sths        occurred.  */
41212e70f6efSpbrook     n = n - env->icount_decr.u16.low;
41222e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
41232e70f6efSpbrook     n++;
41242e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
41252e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4126bf20dc07Sths        the first instruction in a TB then re-execute the preceding
41272e70f6efSpbrook        branch.  */
41282e70f6efSpbrook #if defined(TARGET_MIPS)
41292e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
41302e70f6efSpbrook         env->active_tc.PC -= 4;
41312e70f6efSpbrook         env->icount_decr.u16.low++;
41322e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
41332e70f6efSpbrook     }
41342e70f6efSpbrook #elif defined(TARGET_SH4)
41352e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
41362e70f6efSpbrook             && n > 1) {
41372e70f6efSpbrook         env->pc -= 2;
41382e70f6efSpbrook         env->icount_decr.u16.low++;
41392e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
41402e70f6efSpbrook     }
41412e70f6efSpbrook #endif
41422e70f6efSpbrook     /* This should never happen.  */
41432e70f6efSpbrook     if (n > CF_COUNT_MASK)
41442e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
41452e70f6efSpbrook 
41462e70f6efSpbrook     cflags = n | CF_LAST_IO;
41472e70f6efSpbrook     pc = tb->pc;
41482e70f6efSpbrook     cs_base = tb->cs_base;
41492e70f6efSpbrook     flags = tb->flags;
41502e70f6efSpbrook     tb_phys_invalidate(tb, -1);
41512e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
41522e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
41532e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4154bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
41552e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
41562e70f6efSpbrook        repeating the fault, which is horribly inefficient.
41572e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
41582e70f6efSpbrook        second new TB.  */
41592e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
41602e70f6efSpbrook }
41612e70f6efSpbrook 
4162b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4163b3755a91SPaul Brook 
4164055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4165e3db7226Sbellard {
4166e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4167e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4168e3db7226Sbellard     TranslationBlock *tb;
4169e3db7226Sbellard 
4170e3db7226Sbellard     target_code_size = 0;
4171e3db7226Sbellard     max_target_code_size = 0;
4172e3db7226Sbellard     cross_page = 0;
4173e3db7226Sbellard     direct_jmp_count = 0;
4174e3db7226Sbellard     direct_jmp2_count = 0;
4175e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4176e3db7226Sbellard         tb = &tbs[i];
4177e3db7226Sbellard         target_code_size += tb->size;
4178e3db7226Sbellard         if (tb->size > max_target_code_size)
4179e3db7226Sbellard             max_target_code_size = tb->size;
4180e3db7226Sbellard         if (tb->page_addr[1] != -1)
4181e3db7226Sbellard             cross_page++;
4182e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4183e3db7226Sbellard             direct_jmp_count++;
4184e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4185e3db7226Sbellard                 direct_jmp2_count++;
4186e3db7226Sbellard             }
4187e3db7226Sbellard         }
4188e3db7226Sbellard     }
4189e3db7226Sbellard     /* XXX: avoid using doubles ? */
419057fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4191055403b2SStefan Weil     cpu_fprintf(f, "gen code size       %td/%ld\n",
419226a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
419326a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
419426a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4195e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4196e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4197e3db7226Sbellard                 max_target_code_size);
4198055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4199e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4200e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4201e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4202e3db7226Sbellard             cross_page,
4203e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4204e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4205e3db7226Sbellard                 direct_jmp_count,
4206e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4207e3db7226Sbellard                 direct_jmp2_count,
4208e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
420957fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4210e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4211e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4212e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4213b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4214e3db7226Sbellard }
4215e3db7226Sbellard 
421682afa586SBenjamin Herrenschmidt /*
421782afa586SBenjamin Herrenschmidt  * A helper function for the _utterly broken_ virtio device model to find out if
421882afa586SBenjamin Herrenschmidt  * it's running on a big endian machine. Don't do this at home kids!
421982afa586SBenjamin Herrenschmidt  */
422082afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void);
422182afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void)
422282afa586SBenjamin Herrenschmidt {
422382afa586SBenjamin Herrenschmidt #if defined(TARGET_WORDS_BIGENDIAN)
422482afa586SBenjamin Herrenschmidt     return true;
422582afa586SBenjamin Herrenschmidt #else
422682afa586SBenjamin Herrenschmidt     return false;
422782afa586SBenjamin Herrenschmidt #endif
422882afa586SBenjamin Herrenschmidt }
422982afa586SBenjamin Herrenschmidt 
423061382a50Sbellard #endif
423176f35538SWen Congyang 
423276f35538SWen Congyang #ifndef CONFIG_USER_ONLY
423376f35538SWen Congyang bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
423476f35538SWen Congyang {
423576f35538SWen Congyang     MemoryRegionSection *section;
423676f35538SWen Congyang 
423776f35538SWen Congyang     section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
423876f35538SWen Congyang 
423976f35538SWen Congyang     return !(memory_region_is_ram(section->mr) ||
424076f35538SWen Congyang              memory_region_is_romd(section->mr));
424176f35538SWen Congyang }
424276f35538SWen Congyang #endif
4243