xref: /qemu/system/physmem.c (revision fce537d4a741521ac182d54465c568b101b9a71e)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
3274576198Saliguori #include "osdep.h"
337ba1e619Saliguori #include "kvm.h"
34432d268cSJun Nakajima #include "hw/xen.h"
3529e922b6SBlue Swirl #include "qemu-timer.h"
3662152b8aSAvi Kivity #include "memory.h"
3762152b8aSAvi Kivity #include "exec-memory.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3953a5960aSpbrook #include <qemu.h>
40f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41f01576f1SJuergen Lock #include <sys/param.h>
42f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
43f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
44f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
45f01576f1SJuergen Lock #include <sys/time.h>
46f01576f1SJuergen Lock #include <sys/proc.h>
47f01576f1SJuergen Lock #include <machine/profile.h>
48f01576f1SJuergen Lock #define _KERNEL
49f01576f1SJuergen Lock #include <sys/user.h>
50f01576f1SJuergen Lock #undef _KERNEL
51f01576f1SJuergen Lock #undef sigqueue
52f01576f1SJuergen Lock #include <libutil.h>
53f01576f1SJuergen Lock #endif
54f01576f1SJuergen Lock #endif
55432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
56432d268cSJun Nakajima #include "xen-mapcache.h"
576506e4f9SStefano Stabellini #include "trace.h"
5853a5960aSpbrook #endif
5954936004Sbellard 
60fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6166e85a21Sbellard //#define DEBUG_FLUSH
629fa3e853Sbellard //#define DEBUG_TLB
6367d3b957Spbrook //#define DEBUG_UNASSIGNED
64fd6ce8f6Sbellard 
65fd6ce8f6Sbellard /* make various TB consistency checks */
66fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
6798857888Sbellard //#define DEBUG_TLB_CHECK
68fd6ce8f6Sbellard 
691196be37Sths //#define DEBUG_IOPORT
70db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
711196be37Sths 
7299773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7399773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7499773bd4Spbrook #undef DEBUG_TB_CHECK
7599773bd4Spbrook #endif
7699773bd4Spbrook 
779fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
789fa3e853Sbellard 
79bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8024ab68acSStefan Weil static int code_gen_max_blocks;
819fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82bdaf78e0Sblueswir1 static int nb_tbs;
83eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
84c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85fd6ce8f6Sbellard 
86141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
87141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
88141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
89d03d860bSblueswir1  section close to code segment. */
90d03d860bSblueswir1 #define code_gen_section                                \
91d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
92d03d860bSblueswir1     __attribute__((aligned (32)))
93f8e2af11SStefan Weil #elif defined(_WIN32)
94f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
95f8e2af11SStefan Weil #define code_gen_section                                \
96f8e2af11SStefan Weil     __attribute__((aligned (16)))
97d03d860bSblueswir1 #else
98d03d860bSblueswir1 #define code_gen_section                                \
99d03d860bSblueswir1     __attribute__((aligned (32)))
100d03d860bSblueswir1 #endif
101d03d860bSblueswir1 
102d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
103bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
104bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10526a5f13bSbellard /* threshold to flush the translated code buffer */
106bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
10724ab68acSStefan Weil static uint8_t *code_gen_ptr;
108fd6ce8f6Sbellard 
109e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1109fa3e853Sbellard int phys_ram_fd;
11174576198Saliguori static int in_migration;
11294a6b54fSpbrook 
11385d59fefSPaolo Bonzini RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
11462152b8aSAvi Kivity 
11562152b8aSAvi Kivity static MemoryRegion *system_memory;
116309cb471SAvi Kivity static MemoryRegion *system_io;
11762152b8aSAvi Kivity 
118e2eef170Spbrook #endif
1199fa3e853Sbellard 
1206a00d601Sbellard CPUState *first_cpu;
1216a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1226a00d601Sbellard    cpu_exec() */
123b3c4bbe5SPaolo Bonzini DEFINE_TLS(CPUState *,cpu_single_env);
1242e70f6efSpbrook /* 0 = Do not count executed instructions.
125bf20dc07Sths    1 = Precise instruction counting.
1262e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1272e70f6efSpbrook int use_icount = 0;
1286a00d601Sbellard 
12954936004Sbellard typedef struct PageDesc {
13092e873b9Sbellard     /* list of TBs intersecting this ram page */
131fd6ce8f6Sbellard     TranslationBlock *first_tb;
1329fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1339fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1349fa3e853Sbellard     unsigned int code_write_count;
1359fa3e853Sbellard     uint8_t *code_bitmap;
1369fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1379fa3e853Sbellard     unsigned long flags;
1389fa3e853Sbellard #endif
13954936004Sbellard } PageDesc;
14054936004Sbellard 
14141c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1425cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1435cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14441c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
14541c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
14641c1b1c9SPaul Brook #else
1475cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
14841c1b1c9SPaul Brook #endif
149bedb69eaSj_mayer #else
1505cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
151bedb69eaSj_mayer #endif
15254936004Sbellard 
1535cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1545cd2c5b6SRichard Henderson #define L2_BITS 10
15554936004Sbellard #define L2_SIZE (1 << L2_BITS)
15654936004Sbellard 
1575cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1585cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1595cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1605cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1615cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1625cd2c5b6SRichard Henderson 
1635cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1645cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1655cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1665cd2c5b6SRichard Henderson #else
1675cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1685cd2c5b6SRichard Henderson #endif
1695cd2c5b6SRichard Henderson 
1705cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1715cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1725cd2c5b6SRichard Henderson #else
1735cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1745cd2c5b6SRichard Henderson #endif
1755cd2c5b6SRichard Henderson 
1765cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1775cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1785cd2c5b6SRichard Henderson 
1795cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1805cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1815cd2c5b6SRichard Henderson 
18283fb7adfSbellard unsigned long qemu_real_host_page_size;
18383fb7adfSbellard unsigned long qemu_host_page_size;
18483fb7adfSbellard unsigned long qemu_host_page_mask;
18554936004Sbellard 
1865cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1875cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1885cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18954936004Sbellard 
190e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
19141c1b1c9SPaul Brook typedef struct PhysPageDesc {
19241c1b1c9SPaul Brook     /* offset in host memory of the page + io_index in the low bits */
19341c1b1c9SPaul Brook     ram_addr_t phys_offset;
19441c1b1c9SPaul Brook     ram_addr_t region_offset;
19541c1b1c9SPaul Brook } PhysPageDesc;
19641c1b1c9SPaul Brook 
1975cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
1985cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
1995cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
2006d9a1304SPaul Brook 
201e2eef170Spbrook static void io_mem_init(void);
20262152b8aSAvi Kivity static void memory_map_init(void);
203e2eef170Spbrook 
20433417e70Sbellard /* io memory support */
20533417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
20633417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2096658ffb8Spbrook static int io_mem_watch;
2106658ffb8Spbrook #endif
21133417e70Sbellard 
21234865134Sbellard /* log support */
2131e8b27caSJuha Riihimäki #ifdef WIN32
2141e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2151e8b27caSJuha Riihimäki #else
216d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2171e8b27caSJuha Riihimäki #endif
21834865134Sbellard FILE *logfile;
21934865134Sbellard int loglevel;
220e735b91cSpbrook static int log_append = 0;
22134865134Sbellard 
222e3db7226Sbellard /* statistics */
223b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
224e3db7226Sbellard static int tlb_flush_count;
225b3755a91SPaul Brook #endif
226e3db7226Sbellard static int tb_flush_count;
227e3db7226Sbellard static int tb_phys_invalidate_count;
228e3db7226Sbellard 
2297cb69caeSbellard #ifdef _WIN32
2307cb69caeSbellard static void map_exec(void *addr, long size)
2317cb69caeSbellard {
2327cb69caeSbellard     DWORD old_protect;
2337cb69caeSbellard     VirtualProtect(addr, size,
2347cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2357cb69caeSbellard 
2367cb69caeSbellard }
2377cb69caeSbellard #else
2387cb69caeSbellard static void map_exec(void *addr, long size)
2397cb69caeSbellard {
2404369415fSbellard     unsigned long start, end, page_size;
2417cb69caeSbellard 
2424369415fSbellard     page_size = getpagesize();
2437cb69caeSbellard     start = (unsigned long)addr;
2444369415fSbellard     start &= ~(page_size - 1);
2457cb69caeSbellard 
2467cb69caeSbellard     end = (unsigned long)addr + size;
2474369415fSbellard     end += page_size - 1;
2484369415fSbellard     end &= ~(page_size - 1);
2497cb69caeSbellard 
2507cb69caeSbellard     mprotect((void *)start, end - start,
2517cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2527cb69caeSbellard }
2537cb69caeSbellard #endif
2547cb69caeSbellard 
255b346ff46Sbellard static void page_init(void)
25654936004Sbellard {
25783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
25854936004Sbellard        TARGET_PAGE_SIZE */
259c2b48b69Saliguori #ifdef _WIN32
260c2b48b69Saliguori     {
261c2b48b69Saliguori         SYSTEM_INFO system_info;
262c2b48b69Saliguori 
263c2b48b69Saliguori         GetSystemInfo(&system_info);
264c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
265c2b48b69Saliguori     }
266c2b48b69Saliguori #else
267c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
268c2b48b69Saliguori #endif
26983fb7adfSbellard     if (qemu_host_page_size == 0)
27083fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
27183fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
27283fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
27383fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
27450a9569bSbalrog 
2752e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27650a9569bSbalrog     {
277f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
278f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
279f01576f1SJuergen Lock         int i, cnt;
280f01576f1SJuergen Lock 
281f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
282f01576f1SJuergen Lock         if (freep) {
283f01576f1SJuergen Lock             mmap_lock();
284f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
285f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
286f01576f1SJuergen Lock 
287f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
288f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
289f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
290f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291f01576f1SJuergen Lock 
292f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
293f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
294fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
295f01576f1SJuergen Lock                     } else {
296f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297f01576f1SJuergen Lock                         endaddr = ~0ul;
298fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299f01576f1SJuergen Lock #endif
300f01576f1SJuergen Lock                     }
301f01576f1SJuergen Lock                 }
302f01576f1SJuergen Lock             }
303f01576f1SJuergen Lock             free(freep);
304f01576f1SJuergen Lock             mmap_unlock();
305f01576f1SJuergen Lock         }
306f01576f1SJuergen Lock #else
30750a9569bSbalrog         FILE *f;
30850a9569bSbalrog 
3090776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3105cd2c5b6SRichard Henderson 
311fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
31250a9569bSbalrog         if (f) {
3135cd2c5b6SRichard Henderson             mmap_lock();
3145cd2c5b6SRichard Henderson 
31550a9569bSbalrog             do {
3165cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3175cd2c5b6SRichard Henderson                 int n;
3185cd2c5b6SRichard Henderson 
3195cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3205cd2c5b6SRichard Henderson 
3215cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3225cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3235cd2c5b6SRichard Henderson 
3245cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3255cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3265cd2c5b6SRichard Henderson                     } else {
3275cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3285cd2c5b6SRichard Henderson                     }
3295cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
33050a9569bSbalrog                 }
33150a9569bSbalrog             } while (!feof(f));
3325cd2c5b6SRichard Henderson 
33350a9569bSbalrog             fclose(f);
334c8a706feSpbrook             mmap_unlock();
33550a9569bSbalrog         }
336f01576f1SJuergen Lock #endif
3375cd2c5b6SRichard Henderson     }
33850a9569bSbalrog #endif
33954936004Sbellard }
34054936004Sbellard 
34141c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
34254936004Sbellard {
34341c1b1c9SPaul Brook     PageDesc *pd;
34441c1b1c9SPaul Brook     void **lp;
34541c1b1c9SPaul Brook     int i;
34641c1b1c9SPaul Brook 
34717e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3487267c094SAnthony Liguori     /* We can't use g_malloc because it may recurse into a locked mutex. */
3495cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3505cd2c5b6SRichard Henderson     do {                                                \
3515cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3525cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3535cd2c5b6SRichard Henderson     } while (0)
3545cd2c5b6SRichard Henderson #else
3555cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3567267c094SAnthony Liguori     do { P = g_malloc0(SIZE); } while (0)
3575cd2c5b6SRichard Henderson #endif
3585cd2c5b6SRichard Henderson 
3595cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3605cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3615cd2c5b6SRichard Henderson 
3625cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3635cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3645cd2c5b6SRichard Henderson         void **p = *lp;
3655cd2c5b6SRichard Henderson 
3665cd2c5b6SRichard Henderson         if (p == NULL) {
3675cd2c5b6SRichard Henderson             if (!alloc) {
3685cd2c5b6SRichard Henderson                 return NULL;
3695cd2c5b6SRichard Henderson             }
3705cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
37154936004Sbellard             *lp = p;
3725cd2c5b6SRichard Henderson         }
3735cd2c5b6SRichard Henderson 
3745cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3755cd2c5b6SRichard Henderson     }
3765cd2c5b6SRichard Henderson 
3775cd2c5b6SRichard Henderson     pd = *lp;
3785cd2c5b6SRichard Henderson     if (pd == NULL) {
3795cd2c5b6SRichard Henderson         if (!alloc) {
3805cd2c5b6SRichard Henderson             return NULL;
3815cd2c5b6SRichard Henderson         }
3825cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3835cd2c5b6SRichard Henderson         *lp = pd;
3845cd2c5b6SRichard Henderson     }
3855cd2c5b6SRichard Henderson 
3865cd2c5b6SRichard Henderson #undef ALLOC
3875cd2c5b6SRichard Henderson 
3885cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
38954936004Sbellard }
39054936004Sbellard 
39141c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
39254936004Sbellard {
3935cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
39454936004Sbellard }
39554936004Sbellard 
3966d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
397c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
39892e873b9Sbellard {
399e3f4e2a4Spbrook     PhysPageDesc *pd;
4005cd2c5b6SRichard Henderson     void **lp;
401e3f4e2a4Spbrook     int i;
4025cd2c5b6SRichard Henderson 
4035cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
4045cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
4055cd2c5b6SRichard Henderson 
4065cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
4075cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
4085cd2c5b6SRichard Henderson         void **p = *lp;
4095cd2c5b6SRichard Henderson         if (p == NULL) {
4105cd2c5b6SRichard Henderson             if (!alloc) {
411108c49b8Sbellard                 return NULL;
4125cd2c5b6SRichard Henderson             }
4137267c094SAnthony Liguori             *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
4145cd2c5b6SRichard Henderson         }
4155cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4165cd2c5b6SRichard Henderson     }
4175cd2c5b6SRichard Henderson 
4185cd2c5b6SRichard Henderson     pd = *lp;
4195cd2c5b6SRichard Henderson     if (pd == NULL) {
4205cd2c5b6SRichard Henderson         int i;
4215ab97b7fSAlex Rozenman         int first_index = index & ~(L2_SIZE - 1);
4225cd2c5b6SRichard Henderson 
4235cd2c5b6SRichard Henderson         if (!alloc) {
4245cd2c5b6SRichard Henderson             return NULL;
4255cd2c5b6SRichard Henderson         }
4265cd2c5b6SRichard Henderson 
4277267c094SAnthony Liguori         *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4285cd2c5b6SRichard Henderson 
42967c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
430e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
4315ab97b7fSAlex Rozenman             pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
43267c4d23cSpbrook         }
43392e873b9Sbellard     }
4345cd2c5b6SRichard Henderson 
4355cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
43692e873b9Sbellard }
43792e873b9Sbellard 
438c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
43992e873b9Sbellard {
440108c49b8Sbellard     return phys_page_find_alloc(index, 0);
44192e873b9Sbellard }
44292e873b9Sbellard 
443c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
444c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4453a7d929eSbellard                                     target_ulong vaddr);
446c8a706feSpbrook #define mmap_lock() do { } while(0)
447c8a706feSpbrook #define mmap_unlock() do { } while(0)
4489fa3e853Sbellard #endif
449fd6ce8f6Sbellard 
4504369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4514369415fSbellard 
4524369415fSbellard #if defined(CONFIG_USER_ONLY)
453ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4544369415fSbellard    user mode. It will change when a dedicated libc will be used */
4554369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4564369415fSbellard #endif
4574369415fSbellard 
4584369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
459ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
460ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
4614369415fSbellard #endif
4624369415fSbellard 
4638fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
46426a5f13bSbellard {
4654369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4664369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4674369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4684369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4694369415fSbellard #else
47026a5f13bSbellard     code_gen_buffer_size = tb_size;
47126a5f13bSbellard     if (code_gen_buffer_size == 0) {
4724369415fSbellard #if defined(CONFIG_USER_ONLY)
4734369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4744369415fSbellard #else
475ccbb4d44SStuart Brady         /* XXX: needs adjustments */
47694a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4774369415fSbellard #endif
47826a5f13bSbellard     }
47926a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
48026a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
48126a5f13bSbellard     /* The code gen buffer location may have constraints depending on
48226a5f13bSbellard        the host cpu and OS */
48326a5f13bSbellard #if defined(__linux__)
48426a5f13bSbellard     {
48526a5f13bSbellard         int flags;
486141ac468Sblueswir1         void *start = NULL;
487141ac468Sblueswir1 
48826a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
48926a5f13bSbellard #if defined(__x86_64__)
49026a5f13bSbellard         flags |= MAP_32BIT;
49126a5f13bSbellard         /* Cannot map more than that */
49226a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
49326a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
494141ac468Sblueswir1 #elif defined(__sparc_v9__)
495141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
496141ac468Sblueswir1         flags |= MAP_FIXED;
497141ac468Sblueswir1         start = (void *) 0x60000000UL;
498141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
499141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5001cb0661eSbalrog #elif defined(__arm__)
501222f23f5SDr. David Alan Gilbert         /* Keep the buffer no bigger than 16GB to branch between blocks */
5021cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5031cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
504eba0b893SRichard Henderson #elif defined(__s390x__)
505eba0b893SRichard Henderson         /* Map the buffer so that we can use direct calls and branches.  */
506eba0b893SRichard Henderson         /* We have a +- 4GB range on the branches; leave some slop.  */
507eba0b893SRichard Henderson         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508eba0b893SRichard Henderson             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509eba0b893SRichard Henderson         }
510eba0b893SRichard Henderson         start = (void *)0x90000000UL;
51126a5f13bSbellard #endif
512141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
51326a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
51426a5f13bSbellard                                flags, -1, 0);
51526a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
51626a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
51726a5f13bSbellard             exit(1);
51826a5f13bSbellard         }
51926a5f13bSbellard     }
520cbb608a5SBrad #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
5219f4b09a4STobias Nygren     || defined(__DragonFly__) || defined(__OpenBSD__) \
5229f4b09a4STobias Nygren     || defined(__NetBSD__)
52306e67a82Saliguori     {
52406e67a82Saliguori         int flags;
52506e67a82Saliguori         void *addr = NULL;
52606e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
52706e67a82Saliguori #if defined(__x86_64__)
52806e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
52906e67a82Saliguori          * 0x40000000 is free */
53006e67a82Saliguori         flags |= MAP_FIXED;
53106e67a82Saliguori         addr = (void *)0x40000000;
53206e67a82Saliguori         /* Cannot map more than that */
53306e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
53406e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
5354cd31ad2SBlue Swirl #elif defined(__sparc_v9__)
5364cd31ad2SBlue Swirl         // Map the buffer below 2G, so we can use direct calls and branches
5374cd31ad2SBlue Swirl         flags |= MAP_FIXED;
5384cd31ad2SBlue Swirl         addr = (void *) 0x60000000UL;
5394cd31ad2SBlue Swirl         if (code_gen_buffer_size > (512 * 1024 * 1024)) {
5404cd31ad2SBlue Swirl             code_gen_buffer_size = (512 * 1024 * 1024);
5414cd31ad2SBlue Swirl         }
54206e67a82Saliguori #endif
54306e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
54406e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
54506e67a82Saliguori                                flags, -1, 0);
54606e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
54706e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
54806e67a82Saliguori             exit(1);
54906e67a82Saliguori         }
55006e67a82Saliguori     }
55126a5f13bSbellard #else
5527267c094SAnthony Liguori     code_gen_buffer = g_malloc(code_gen_buffer_size);
55326a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
55426a5f13bSbellard #endif
5554369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
55626a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
55726a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
558a884da8aSPeter Maydell         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
55926a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
5607267c094SAnthony Liguori     tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
56126a5f13bSbellard }
56226a5f13bSbellard 
56326a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
56426a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
56526a5f13bSbellard    size. */
566d5ab9713SJan Kiszka void tcg_exec_init(unsigned long tb_size)
56726a5f13bSbellard {
56826a5f13bSbellard     cpu_gen_init();
56926a5f13bSbellard     code_gen_alloc(tb_size);
57026a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5714369415fSbellard     page_init();
5729002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
5739002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
5749002ec79SRichard Henderson        initialize the prologue now.  */
5759002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
5769002ec79SRichard Henderson #endif
57726a5f13bSbellard }
57826a5f13bSbellard 
579d5ab9713SJan Kiszka bool tcg_enabled(void)
580d5ab9713SJan Kiszka {
581d5ab9713SJan Kiszka     return code_gen_buffer != NULL;
582d5ab9713SJan Kiszka }
583d5ab9713SJan Kiszka 
584d5ab9713SJan Kiszka void cpu_exec_init_all(void)
585d5ab9713SJan Kiszka {
586d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
587d5ab9713SJan Kiszka     memory_map_init();
588d5ab9713SJan Kiszka     io_mem_init();
589d5ab9713SJan Kiszka #endif
590d5ab9713SJan Kiszka }
591d5ab9713SJan Kiszka 
5929656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5939656f324Spbrook 
594e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
595e7f4eff7SJuan Quintela {
596e7f4eff7SJuan Quintela     CPUState *env = opaque;
597e7f4eff7SJuan Quintela 
5983098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5993098dba0Saurel32        version_id is increased. */
6003098dba0Saurel32     env->interrupt_request &= ~0x01;
6019656f324Spbrook     tlb_flush(env, 1);
6029656f324Spbrook 
6039656f324Spbrook     return 0;
6049656f324Spbrook }
605e7f4eff7SJuan Quintela 
606e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
607e7f4eff7SJuan Quintela     .name = "cpu_common",
608e7f4eff7SJuan Quintela     .version_id = 1,
609e7f4eff7SJuan Quintela     .minimum_version_id = 1,
610e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
611e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
612e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
613e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
614e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
615e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
616e7f4eff7SJuan Quintela     }
617e7f4eff7SJuan Quintela };
6189656f324Spbrook #endif
6199656f324Spbrook 
620950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
621950f1472SGlauber Costa {
622950f1472SGlauber Costa     CPUState *env = first_cpu;
623950f1472SGlauber Costa 
624950f1472SGlauber Costa     while (env) {
625950f1472SGlauber Costa         if (env->cpu_index == cpu)
626950f1472SGlauber Costa             break;
627950f1472SGlauber Costa         env = env->next_cpu;
628950f1472SGlauber Costa     }
629950f1472SGlauber Costa 
630950f1472SGlauber Costa     return env;
631950f1472SGlauber Costa }
632950f1472SGlauber Costa 
6336a00d601Sbellard void cpu_exec_init(CPUState *env)
634fd6ce8f6Sbellard {
6356a00d601Sbellard     CPUState **penv;
6366a00d601Sbellard     int cpu_index;
6376a00d601Sbellard 
638c2764719Spbrook #if defined(CONFIG_USER_ONLY)
639c2764719Spbrook     cpu_list_lock();
640c2764719Spbrook #endif
6416a00d601Sbellard     env->next_cpu = NULL;
6426a00d601Sbellard     penv = &first_cpu;
6436a00d601Sbellard     cpu_index = 0;
6446a00d601Sbellard     while (*penv != NULL) {
6451e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6466a00d601Sbellard         cpu_index++;
6476a00d601Sbellard     }
6486a00d601Sbellard     env->cpu_index = cpu_index;
649268a362cSaliguori     env->numa_node = 0;
65072cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
65172cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
652dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
653dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
654dc7a09cfSJan Kiszka #endif
6556a00d601Sbellard     *penv = env;
656c2764719Spbrook #if defined(CONFIG_USER_ONLY)
657c2764719Spbrook     cpu_list_unlock();
658c2764719Spbrook #endif
659b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6600be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
6610be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
662b3c7724cSpbrook                     cpu_save, cpu_load, env);
663b3c7724cSpbrook #endif
664fd6ce8f6Sbellard }
665fd6ce8f6Sbellard 
666d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
667d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
668d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
669d1a1eb74STristan Gingold {
670d1a1eb74STristan Gingold     TranslationBlock *tb;
671d1a1eb74STristan Gingold 
672d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
673d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
674d1a1eb74STristan Gingold         return NULL;
675d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
676d1a1eb74STristan Gingold     tb->pc = pc;
677d1a1eb74STristan Gingold     tb->cflags = 0;
678d1a1eb74STristan Gingold     return tb;
679d1a1eb74STristan Gingold }
680d1a1eb74STristan Gingold 
681d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
682d1a1eb74STristan Gingold {
683d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
684d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
685d1a1eb74STristan Gingold        be the last one generated.  */
686d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
687d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
688d1a1eb74STristan Gingold         nb_tbs--;
689d1a1eb74STristan Gingold     }
690d1a1eb74STristan Gingold }
691d1a1eb74STristan Gingold 
6929fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6939fa3e853Sbellard {
6949fa3e853Sbellard     if (p->code_bitmap) {
6957267c094SAnthony Liguori         g_free(p->code_bitmap);
6969fa3e853Sbellard         p->code_bitmap = NULL;
6979fa3e853Sbellard     }
6989fa3e853Sbellard     p->code_write_count = 0;
6999fa3e853Sbellard }
7009fa3e853Sbellard 
7015cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
7025cd2c5b6SRichard Henderson 
7035cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
7045cd2c5b6SRichard Henderson {
7055cd2c5b6SRichard Henderson     int i;
7065cd2c5b6SRichard Henderson 
7075cd2c5b6SRichard Henderson     if (*lp == NULL) {
7085cd2c5b6SRichard Henderson         return;
7095cd2c5b6SRichard Henderson     }
7105cd2c5b6SRichard Henderson     if (level == 0) {
7115cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7127296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7135cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7145cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7155cd2c5b6SRichard Henderson         }
7165cd2c5b6SRichard Henderson     } else {
7175cd2c5b6SRichard Henderson         void **pp = *lp;
7187296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7195cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7205cd2c5b6SRichard Henderson         }
7215cd2c5b6SRichard Henderson     }
7225cd2c5b6SRichard Henderson }
7235cd2c5b6SRichard Henderson 
724fd6ce8f6Sbellard static void page_flush_tb(void)
725fd6ce8f6Sbellard {
7265cd2c5b6SRichard Henderson     int i;
7275cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7285cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
729fd6ce8f6Sbellard     }
730fd6ce8f6Sbellard }
731fd6ce8f6Sbellard 
732fd6ce8f6Sbellard /* flush all the translation blocks */
733d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
7346a00d601Sbellard void tb_flush(CPUState *env1)
735fd6ce8f6Sbellard {
7366a00d601Sbellard     CPUState *env;
7370124311eSbellard #if defined(DEBUG_FLUSH)
738ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
739ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
740ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
741ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
742fd6ce8f6Sbellard #endif
74326a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
744a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
745a208e54aSpbrook 
746fd6ce8f6Sbellard     nb_tbs = 0;
7476a00d601Sbellard 
7486a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7498a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
7506a00d601Sbellard     }
7519fa3e853Sbellard 
7528a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
753fd6ce8f6Sbellard     page_flush_tb();
7549fa3e853Sbellard 
755fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
756d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
757d4e8164fSbellard        expensive */
758e3db7226Sbellard     tb_flush_count++;
759fd6ce8f6Sbellard }
760fd6ce8f6Sbellard 
761fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
762fd6ce8f6Sbellard 
763bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
764fd6ce8f6Sbellard {
765fd6ce8f6Sbellard     TranslationBlock *tb;
766fd6ce8f6Sbellard     int i;
767fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
76899773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
76999773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
770fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
771fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
7720bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7730bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
77499773bd4Spbrook                        address, (long)tb->pc, tb->size);
775fd6ce8f6Sbellard             }
776fd6ce8f6Sbellard         }
777fd6ce8f6Sbellard     }
778fd6ce8f6Sbellard }
779fd6ce8f6Sbellard 
780fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
781fd6ce8f6Sbellard static void tb_page_check(void)
782fd6ce8f6Sbellard {
783fd6ce8f6Sbellard     TranslationBlock *tb;
784fd6ce8f6Sbellard     int i, flags1, flags2;
785fd6ce8f6Sbellard 
78699773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
78799773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
788fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
789fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
790fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
791fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
79299773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
793fd6ce8f6Sbellard             }
794fd6ce8f6Sbellard         }
795fd6ce8f6Sbellard     }
796fd6ce8f6Sbellard }
797fd6ce8f6Sbellard 
798fd6ce8f6Sbellard #endif
799fd6ce8f6Sbellard 
800fd6ce8f6Sbellard /* invalidate one TB */
801fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
802fd6ce8f6Sbellard                              int next_offset)
803fd6ce8f6Sbellard {
804fd6ce8f6Sbellard     TranslationBlock *tb1;
805fd6ce8f6Sbellard     for(;;) {
806fd6ce8f6Sbellard         tb1 = *ptb;
807fd6ce8f6Sbellard         if (tb1 == tb) {
808fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
809fd6ce8f6Sbellard             break;
810fd6ce8f6Sbellard         }
811fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
812fd6ce8f6Sbellard     }
813fd6ce8f6Sbellard }
814fd6ce8f6Sbellard 
8159fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8169fa3e853Sbellard {
8179fa3e853Sbellard     TranslationBlock *tb1;
8189fa3e853Sbellard     unsigned int n1;
8199fa3e853Sbellard 
8209fa3e853Sbellard     for(;;) {
8219fa3e853Sbellard         tb1 = *ptb;
8229fa3e853Sbellard         n1 = (long)tb1 & 3;
8239fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8249fa3e853Sbellard         if (tb1 == tb) {
8259fa3e853Sbellard             *ptb = tb1->page_next[n1];
8269fa3e853Sbellard             break;
8279fa3e853Sbellard         }
8289fa3e853Sbellard         ptb = &tb1->page_next[n1];
8299fa3e853Sbellard     }
8309fa3e853Sbellard }
8319fa3e853Sbellard 
832d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
833d4e8164fSbellard {
834d4e8164fSbellard     TranslationBlock *tb1, **ptb;
835d4e8164fSbellard     unsigned int n1;
836d4e8164fSbellard 
837d4e8164fSbellard     ptb = &tb->jmp_next[n];
838d4e8164fSbellard     tb1 = *ptb;
839d4e8164fSbellard     if (tb1) {
840d4e8164fSbellard         /* find tb(n) in circular list */
841d4e8164fSbellard         for(;;) {
842d4e8164fSbellard             tb1 = *ptb;
843d4e8164fSbellard             n1 = (long)tb1 & 3;
844d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
845d4e8164fSbellard             if (n1 == n && tb1 == tb)
846d4e8164fSbellard                 break;
847d4e8164fSbellard             if (n1 == 2) {
848d4e8164fSbellard                 ptb = &tb1->jmp_first;
849d4e8164fSbellard             } else {
850d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
851d4e8164fSbellard             }
852d4e8164fSbellard         }
853d4e8164fSbellard         /* now we can suppress tb(n) from the list */
854d4e8164fSbellard         *ptb = tb->jmp_next[n];
855d4e8164fSbellard 
856d4e8164fSbellard         tb->jmp_next[n] = NULL;
857d4e8164fSbellard     }
858d4e8164fSbellard }
859d4e8164fSbellard 
860d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
861d4e8164fSbellard    another TB */
862d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
863d4e8164fSbellard {
864d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
865d4e8164fSbellard }
866d4e8164fSbellard 
86741c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
868fd6ce8f6Sbellard {
8696a00d601Sbellard     CPUState *env;
870fd6ce8f6Sbellard     PageDesc *p;
8718a40a180Sbellard     unsigned int h, n1;
87241c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
8738a40a180Sbellard     TranslationBlock *tb1, *tb2;
874fd6ce8f6Sbellard 
8759fa3e853Sbellard     /* remove the TB from the hash list */
8769fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8779fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8789fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8799fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8809fa3e853Sbellard 
8819fa3e853Sbellard     /* remove the TB from the page list */
8829fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8839fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8849fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8859fa3e853Sbellard         invalidate_page_bitmap(p);
8869fa3e853Sbellard     }
8879fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8889fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8899fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8909fa3e853Sbellard         invalidate_page_bitmap(p);
8919fa3e853Sbellard     }
8929fa3e853Sbellard 
8938a40a180Sbellard     tb_invalidated_flag = 1;
8948a40a180Sbellard 
8958a40a180Sbellard     /* remove the TB from the hash list */
8968a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8976a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8986a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8996a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
9006a00d601Sbellard     }
9018a40a180Sbellard 
9028a40a180Sbellard     /* suppress this TB from the two jump lists */
9038a40a180Sbellard     tb_jmp_remove(tb, 0);
9048a40a180Sbellard     tb_jmp_remove(tb, 1);
9058a40a180Sbellard 
9068a40a180Sbellard     /* suppress any remaining jumps to this TB */
9078a40a180Sbellard     tb1 = tb->jmp_first;
9088a40a180Sbellard     for(;;) {
9098a40a180Sbellard         n1 = (long)tb1 & 3;
9108a40a180Sbellard         if (n1 == 2)
9118a40a180Sbellard             break;
9128a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
9138a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9148a40a180Sbellard         tb_reset_jump(tb1, n1);
9158a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9168a40a180Sbellard         tb1 = tb2;
9178a40a180Sbellard     }
9188a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9198a40a180Sbellard 
920e3db7226Sbellard     tb_phys_invalidate_count++;
9219fa3e853Sbellard }
9229fa3e853Sbellard 
9239fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9249fa3e853Sbellard {
9259fa3e853Sbellard     int end, mask, end1;
9269fa3e853Sbellard 
9279fa3e853Sbellard     end = start + len;
9289fa3e853Sbellard     tab += start >> 3;
9299fa3e853Sbellard     mask = 0xff << (start & 7);
9309fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
9319fa3e853Sbellard         if (start < end) {
9329fa3e853Sbellard             mask &= ~(0xff << (end & 7));
9339fa3e853Sbellard             *tab |= mask;
9349fa3e853Sbellard         }
9359fa3e853Sbellard     } else {
9369fa3e853Sbellard         *tab++ |= mask;
9379fa3e853Sbellard         start = (start + 8) & ~7;
9389fa3e853Sbellard         end1 = end & ~7;
9399fa3e853Sbellard         while (start < end1) {
9409fa3e853Sbellard             *tab++ = 0xff;
9419fa3e853Sbellard             start += 8;
9429fa3e853Sbellard         }
9439fa3e853Sbellard         if (start < end) {
9449fa3e853Sbellard             mask = ~(0xff << (end & 7));
9459fa3e853Sbellard             *tab |= mask;
9469fa3e853Sbellard         }
9479fa3e853Sbellard     }
9489fa3e853Sbellard }
9499fa3e853Sbellard 
9509fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
9519fa3e853Sbellard {
9529fa3e853Sbellard     int n, tb_start, tb_end;
9539fa3e853Sbellard     TranslationBlock *tb;
9549fa3e853Sbellard 
9557267c094SAnthony Liguori     p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9569fa3e853Sbellard 
9579fa3e853Sbellard     tb = p->first_tb;
9589fa3e853Sbellard     while (tb != NULL) {
9599fa3e853Sbellard         n = (long)tb & 3;
9609fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9619fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9629fa3e853Sbellard         if (n == 0) {
9639fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9649fa3e853Sbellard                it is not a problem */
9659fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
9669fa3e853Sbellard             tb_end = tb_start + tb->size;
9679fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
9689fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
9699fa3e853Sbellard         } else {
9709fa3e853Sbellard             tb_start = 0;
9719fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9729fa3e853Sbellard         }
9739fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9749fa3e853Sbellard         tb = tb->page_next[n];
9759fa3e853Sbellard     }
9769fa3e853Sbellard }
9779fa3e853Sbellard 
9782e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9792e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9802e70f6efSpbrook                               int flags, int cflags)
981d720b93dSbellard {
982d720b93dSbellard     TranslationBlock *tb;
983d720b93dSbellard     uint8_t *tc_ptr;
98441c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
98541c1b1c9SPaul Brook     target_ulong virt_page2;
986d720b93dSbellard     int code_gen_size;
987d720b93dSbellard 
98841c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
989c27004ecSbellard     tb = tb_alloc(pc);
990d720b93dSbellard     if (!tb) {
991d720b93dSbellard         /* flush must be done */
992d720b93dSbellard         tb_flush(env);
993d720b93dSbellard         /* cannot fail at this point */
994c27004ecSbellard         tb = tb_alloc(pc);
9952e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9962e70f6efSpbrook         tb_invalidated_flag = 1;
997d720b93dSbellard     }
998d720b93dSbellard     tc_ptr = code_gen_ptr;
999d720b93dSbellard     tb->tc_ptr = tc_ptr;
1000d720b93dSbellard     tb->cs_base = cs_base;
1001d720b93dSbellard     tb->flags = flags;
1002d720b93dSbellard     tb->cflags = cflags;
1003d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
1004d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1005d720b93dSbellard 
1006d720b93dSbellard     /* check next page if needed */
1007c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1008d720b93dSbellard     phys_page2 = -1;
1009c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
101041c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1011d720b93dSbellard     }
101241c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10132e70f6efSpbrook     return tb;
1014d720b93dSbellard }
1015d720b93dSbellard 
10169fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
10179fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
1018d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
1019d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
1020d720b93dSbellard    TB if code is modified inside this TB. */
102141c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1022d720b93dSbellard                                    int is_cpu_write_access)
10239fa3e853Sbellard {
10246b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
1025d720b93dSbellard     CPUState *env = cpu_single_env;
102641c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
10276b917547Saliguori     PageDesc *p;
10286b917547Saliguori     int n;
10296b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
10306b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
10316b917547Saliguori     TranslationBlock *current_tb = NULL;
10326b917547Saliguori     int current_tb_modified = 0;
10336b917547Saliguori     target_ulong current_pc = 0;
10346b917547Saliguori     target_ulong current_cs_base = 0;
10356b917547Saliguori     int current_flags = 0;
10366b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
10379fa3e853Sbellard 
10389fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10399fa3e853Sbellard     if (!p)
10409fa3e853Sbellard         return;
10419fa3e853Sbellard     if (!p->code_bitmap &&
1042d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1043d720b93dSbellard         is_cpu_write_access) {
10449fa3e853Sbellard         /* build code bitmap */
10459fa3e853Sbellard         build_page_bitmap(p);
10469fa3e853Sbellard     }
10479fa3e853Sbellard 
10489fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
10499fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
10509fa3e853Sbellard     tb = p->first_tb;
10519fa3e853Sbellard     while (tb != NULL) {
10529fa3e853Sbellard         n = (long)tb & 3;
10539fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
10549fa3e853Sbellard         tb_next = tb->page_next[n];
10559fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10569fa3e853Sbellard         if (n == 0) {
10579fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10589fa3e853Sbellard                it is not a problem */
10599fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
10609fa3e853Sbellard             tb_end = tb_start + tb->size;
10619fa3e853Sbellard         } else {
10629fa3e853Sbellard             tb_start = tb->page_addr[1];
10639fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10649fa3e853Sbellard         }
10659fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1066d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1067d720b93dSbellard             if (current_tb_not_found) {
1068d720b93dSbellard                 current_tb_not_found = 0;
1069d720b93dSbellard                 current_tb = NULL;
10702e70f6efSpbrook                 if (env->mem_io_pc) {
1071d720b93dSbellard                     /* now we have a real cpu fault */
10722e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1073d720b93dSbellard                 }
1074d720b93dSbellard             }
1075d720b93dSbellard             if (current_tb == tb &&
10762e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1077d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1078d720b93dSbellard                 its execution. We could be more precise by checking
1079d720b93dSbellard                 that the modification is after the current PC, but it
1080d720b93dSbellard                 would require a specialized function to partially
1081d720b93dSbellard                 restore the CPU state */
1082d720b93dSbellard 
1083d720b93dSbellard                 current_tb_modified = 1;
1084618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
10856b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10866b917547Saliguori                                      &current_flags);
1087d720b93dSbellard             }
1088d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10896f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10906f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10916f5a9f7eSbellard             saved_tb = NULL;
10926f5a9f7eSbellard             if (env) {
1093ea1c1802Sbellard                 saved_tb = env->current_tb;
1094ea1c1802Sbellard                 env->current_tb = NULL;
10956f5a9f7eSbellard             }
10969fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10976f5a9f7eSbellard             if (env) {
1098ea1c1802Sbellard                 env->current_tb = saved_tb;
1099ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1100ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
11019fa3e853Sbellard             }
11026f5a9f7eSbellard         }
11039fa3e853Sbellard         tb = tb_next;
11049fa3e853Sbellard     }
11059fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
11069fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
11079fa3e853Sbellard     if (!p->first_tb) {
11089fa3e853Sbellard         invalidate_page_bitmap(p);
1109d720b93dSbellard         if (is_cpu_write_access) {
11102e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1111d720b93dSbellard         }
1112d720b93dSbellard     }
1113d720b93dSbellard #endif
1114d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1115d720b93dSbellard     if (current_tb_modified) {
1116d720b93dSbellard         /* we generate a block containing just the instruction
1117d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1118d720b93dSbellard            itself */
1119ea1c1802Sbellard         env->current_tb = NULL;
11202e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1121d720b93dSbellard         cpu_resume_from_signal(env, NULL);
11229fa3e853Sbellard     }
11239fa3e853Sbellard #endif
11249fa3e853Sbellard }
11259fa3e853Sbellard 
11269fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
112741c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
11289fa3e853Sbellard {
11299fa3e853Sbellard     PageDesc *p;
11309fa3e853Sbellard     int offset, b;
113159817ccbSbellard #if 0
1132a4193c8aSbellard     if (1) {
113393fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
11342e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1135a4193c8aSbellard                   cpu_single_env->eip,
1136a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1137a4193c8aSbellard     }
113859817ccbSbellard #endif
11399fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11409fa3e853Sbellard     if (!p)
11419fa3e853Sbellard         return;
11429fa3e853Sbellard     if (p->code_bitmap) {
11439fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
11449fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
11459fa3e853Sbellard         if (b & ((1 << len) - 1))
11469fa3e853Sbellard             goto do_invalidate;
11479fa3e853Sbellard     } else {
11489fa3e853Sbellard     do_invalidate:
1149d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
11509fa3e853Sbellard     }
11519fa3e853Sbellard }
11529fa3e853Sbellard 
11539fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
115441c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
1155d720b93dSbellard                                     unsigned long pc, void *puc)
11569fa3e853Sbellard {
11576b917547Saliguori     TranslationBlock *tb;
11589fa3e853Sbellard     PageDesc *p;
11596b917547Saliguori     int n;
1160d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
11616b917547Saliguori     TranslationBlock *current_tb = NULL;
1162d720b93dSbellard     CPUState *env = cpu_single_env;
11636b917547Saliguori     int current_tb_modified = 0;
11646b917547Saliguori     target_ulong current_pc = 0;
11656b917547Saliguori     target_ulong current_cs_base = 0;
11666b917547Saliguori     int current_flags = 0;
1167d720b93dSbellard #endif
11689fa3e853Sbellard 
11699fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
11709fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1171fd6ce8f6Sbellard     if (!p)
1172fd6ce8f6Sbellard         return;
1173fd6ce8f6Sbellard     tb = p->first_tb;
1174d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1175d720b93dSbellard     if (tb && pc != 0) {
1176d720b93dSbellard         current_tb = tb_find_pc(pc);
1177d720b93dSbellard     }
1178d720b93dSbellard #endif
1179fd6ce8f6Sbellard     while (tb != NULL) {
11809fa3e853Sbellard         n = (long)tb & 3;
11819fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1182d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1183d720b93dSbellard         if (current_tb == tb &&
11842e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1185d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1186d720b93dSbellard                    its execution. We could be more precise by checking
1187d720b93dSbellard                    that the modification is after the current PC, but it
1188d720b93dSbellard                    would require a specialized function to partially
1189d720b93dSbellard                    restore the CPU state */
1190d720b93dSbellard 
1191d720b93dSbellard             current_tb_modified = 1;
1192618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
11936b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11946b917547Saliguori                                  &current_flags);
1195d720b93dSbellard         }
1196d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11979fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11989fa3e853Sbellard         tb = tb->page_next[n];
1199fd6ce8f6Sbellard     }
1200fd6ce8f6Sbellard     p->first_tb = NULL;
1201d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1202d720b93dSbellard     if (current_tb_modified) {
1203d720b93dSbellard         /* we generate a block containing just the instruction
1204d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1205d720b93dSbellard            itself */
1206ea1c1802Sbellard         env->current_tb = NULL;
12072e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1208d720b93dSbellard         cpu_resume_from_signal(env, puc);
1209d720b93dSbellard     }
1210d720b93dSbellard #endif
1211fd6ce8f6Sbellard }
12129fa3e853Sbellard #endif
1213fd6ce8f6Sbellard 
1214fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
12159fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
121641c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1217fd6ce8f6Sbellard {
1218fd6ce8f6Sbellard     PageDesc *p;
12194429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12204429ab44SJuan Quintela     bool page_already_protected;
12214429ab44SJuan Quintela #endif
12229fa3e853Sbellard 
12239fa3e853Sbellard     tb->page_addr[n] = page_addr;
12245cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
12259fa3e853Sbellard     tb->page_next[n] = p->first_tb;
12264429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12274429ab44SJuan Quintela     page_already_protected = p->first_tb != NULL;
12284429ab44SJuan Quintela #endif
12299fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
12309fa3e853Sbellard     invalidate_page_bitmap(p);
12319fa3e853Sbellard 
1232107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1233d720b93dSbellard 
12349fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
12359fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
123653a5960aSpbrook         target_ulong addr;
123753a5960aSpbrook         PageDesc *p2;
1238fd6ce8f6Sbellard         int prot;
1239fd6ce8f6Sbellard 
1240fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1241fd6ce8f6Sbellard            page fault + mprotect overhead) */
124253a5960aSpbrook         page_addr &= qemu_host_page_mask;
1243fd6ce8f6Sbellard         prot = 0;
124453a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
124553a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
124653a5960aSpbrook 
124753a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
124853a5960aSpbrook             if (!p2)
124953a5960aSpbrook                 continue;
125053a5960aSpbrook             prot |= p2->flags;
125153a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
125253a5960aSpbrook           }
125353a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1254fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1255fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1256ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
125753a5960aSpbrook                page_addr);
1258fd6ce8f6Sbellard #endif
1259fd6ce8f6Sbellard     }
12609fa3e853Sbellard #else
12619fa3e853Sbellard     /* if some code is already present, then the pages are already
12629fa3e853Sbellard        protected. So we handle the case where only the first TB is
12639fa3e853Sbellard        allocated in a physical page */
12644429ab44SJuan Quintela     if (!page_already_protected) {
12656a00d601Sbellard         tlb_protect_code(page_addr);
12669fa3e853Sbellard     }
12679fa3e853Sbellard #endif
1268d720b93dSbellard 
1269d720b93dSbellard #endif /* TARGET_HAS_SMC */
1270fd6ce8f6Sbellard }
1271fd6ce8f6Sbellard 
12729fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12739fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
127441c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
127541c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1276d4e8164fSbellard {
12779fa3e853Sbellard     unsigned int h;
12789fa3e853Sbellard     TranslationBlock **ptb;
12799fa3e853Sbellard 
1280c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1281c8a706feSpbrook        before we are done.  */
1282c8a706feSpbrook     mmap_lock();
12839fa3e853Sbellard     /* add in the physical hash table */
12849fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12859fa3e853Sbellard     ptb = &tb_phys_hash[h];
12869fa3e853Sbellard     tb->phys_hash_next = *ptb;
12879fa3e853Sbellard     *ptb = tb;
1288fd6ce8f6Sbellard 
1289fd6ce8f6Sbellard     /* add in the page list */
12909fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12919fa3e853Sbellard     if (phys_page2 != -1)
12929fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12939fa3e853Sbellard     else
12949fa3e853Sbellard         tb->page_addr[1] = -1;
12959fa3e853Sbellard 
1296d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1297d4e8164fSbellard     tb->jmp_next[0] = NULL;
1298d4e8164fSbellard     tb->jmp_next[1] = NULL;
1299d4e8164fSbellard 
1300d4e8164fSbellard     /* init original jump addresses */
1301d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1302d4e8164fSbellard         tb_reset_jump(tb, 0);
1303d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1304d4e8164fSbellard         tb_reset_jump(tb, 1);
13058a40a180Sbellard 
13068a40a180Sbellard #ifdef DEBUG_TB_CHECK
13078a40a180Sbellard     tb_page_check();
13088a40a180Sbellard #endif
1309c8a706feSpbrook     mmap_unlock();
1310fd6ce8f6Sbellard }
1311fd6ce8f6Sbellard 
1312a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1313a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1314a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1315a513fe19Sbellard {
1316a513fe19Sbellard     int m_min, m_max, m;
1317a513fe19Sbellard     unsigned long v;
1318a513fe19Sbellard     TranslationBlock *tb;
1319a513fe19Sbellard 
1320a513fe19Sbellard     if (nb_tbs <= 0)
1321a513fe19Sbellard         return NULL;
1322a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1323a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1324a513fe19Sbellard         return NULL;
1325a513fe19Sbellard     /* binary search (cf Knuth) */
1326a513fe19Sbellard     m_min = 0;
1327a513fe19Sbellard     m_max = nb_tbs - 1;
1328a513fe19Sbellard     while (m_min <= m_max) {
1329a513fe19Sbellard         m = (m_min + m_max) >> 1;
1330a513fe19Sbellard         tb = &tbs[m];
1331a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1332a513fe19Sbellard         if (v == tc_ptr)
1333a513fe19Sbellard             return tb;
1334a513fe19Sbellard         else if (tc_ptr < v) {
1335a513fe19Sbellard             m_max = m - 1;
1336a513fe19Sbellard         } else {
1337a513fe19Sbellard             m_min = m + 1;
1338a513fe19Sbellard         }
1339a513fe19Sbellard     }
1340a513fe19Sbellard     return &tbs[m_max];
1341a513fe19Sbellard }
13427501267eSbellard 
1343ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1344ea041c0eSbellard 
1345ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1346ea041c0eSbellard {
1347ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1348ea041c0eSbellard     unsigned int n1;
1349ea041c0eSbellard 
1350ea041c0eSbellard     tb1 = tb->jmp_next[n];
1351ea041c0eSbellard     if (tb1 != NULL) {
1352ea041c0eSbellard         /* find head of list */
1353ea041c0eSbellard         for(;;) {
1354ea041c0eSbellard             n1 = (long)tb1 & 3;
1355ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1356ea041c0eSbellard             if (n1 == 2)
1357ea041c0eSbellard                 break;
1358ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1359ea041c0eSbellard         }
1360ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1361ea041c0eSbellard         tb_next = tb1;
1362ea041c0eSbellard 
1363ea041c0eSbellard         /* remove tb from the jmp_first list */
1364ea041c0eSbellard         ptb = &tb_next->jmp_first;
1365ea041c0eSbellard         for(;;) {
1366ea041c0eSbellard             tb1 = *ptb;
1367ea041c0eSbellard             n1 = (long)tb1 & 3;
1368ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369ea041c0eSbellard             if (n1 == n && tb1 == tb)
1370ea041c0eSbellard                 break;
1371ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1372ea041c0eSbellard         }
1373ea041c0eSbellard         *ptb = tb->jmp_next[n];
1374ea041c0eSbellard         tb->jmp_next[n] = NULL;
1375ea041c0eSbellard 
1376ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1377ea041c0eSbellard         tb_reset_jump(tb, n);
1378ea041c0eSbellard 
13790124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1380ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1381ea041c0eSbellard     }
1382ea041c0eSbellard }
1383ea041c0eSbellard 
1384ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1385ea041c0eSbellard {
1386ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1387ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1388ea041c0eSbellard }
1389ea041c0eSbellard 
13901fddef4bSbellard #if defined(TARGET_HAS_ICE)
139194df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
139294df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
139394df27fdSPaul Brook {
139494df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
139594df27fdSPaul Brook }
139694df27fdSPaul Brook #else
1397d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1398d720b93dSbellard {
1399c227f099SAnthony Liguori     target_phys_addr_t addr;
14009b3c35e0Sj_mayer     target_ulong pd;
1401c227f099SAnthony Liguori     ram_addr_t ram_addr;
1402c2f07f81Spbrook     PhysPageDesc *p;
1403d720b93dSbellard 
1404c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1405c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1406c2f07f81Spbrook     if (!p) {
1407c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1408c2f07f81Spbrook     } else {
1409c2f07f81Spbrook         pd = p->phys_offset;
1410c2f07f81Spbrook     }
1411c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1412706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1413d720b93dSbellard }
1414c27004ecSbellard #endif
141594df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1416d720b93dSbellard 
1417c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1418c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1419c527ee8fSPaul Brook 
1420c527ee8fSPaul Brook {
1421c527ee8fSPaul Brook }
1422c527ee8fSPaul Brook 
1423c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1425c527ee8fSPaul Brook {
1426c527ee8fSPaul Brook     return -ENOSYS;
1427c527ee8fSPaul Brook }
1428c527ee8fSPaul Brook #else
14296658ffb8Spbrook /* Add a watchpoint.  */
1430a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
14326658ffb8Spbrook {
1433b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1434c0ce998eSaliguori     CPUWatchpoint *wp;
14356658ffb8Spbrook 
1436b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1438b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1439b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1440b4051334Saliguori         return -EINVAL;
1441b4051334Saliguori     }
14427267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
14436658ffb8Spbrook 
1444a1d1bb31Saliguori     wp->vaddr = addr;
1445b4051334Saliguori     wp->len_mask = len_mask;
1446a1d1bb31Saliguori     wp->flags = flags;
1447a1d1bb31Saliguori 
14482dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1449c0ce998eSaliguori     if (flags & BP_GDB)
145072cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1451c0ce998eSaliguori     else
145272cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1453a1d1bb31Saliguori 
14546658ffb8Spbrook     tlb_flush_page(env, addr);
1455a1d1bb31Saliguori 
1456a1d1bb31Saliguori     if (watchpoint)
1457a1d1bb31Saliguori         *watchpoint = wp;
1458a1d1bb31Saliguori     return 0;
14596658ffb8Spbrook }
14606658ffb8Spbrook 
1461a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1462a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1463a1d1bb31Saliguori                           int flags)
14646658ffb8Spbrook {
1465b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1466a1d1bb31Saliguori     CPUWatchpoint *wp;
14676658ffb8Spbrook 
146872cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1469b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14706e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1471a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14726658ffb8Spbrook             return 0;
14736658ffb8Spbrook         }
14746658ffb8Spbrook     }
1475a1d1bb31Saliguori     return -ENOENT;
14766658ffb8Spbrook }
14776658ffb8Spbrook 
1478a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1479a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1480a1d1bb31Saliguori {
148172cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14827d03f82fSedgar_igl 
1483a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1484a1d1bb31Saliguori 
14857267c094SAnthony Liguori     g_free(watchpoint);
14867d03f82fSedgar_igl }
14877d03f82fSedgar_igl 
1488a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1489a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1490a1d1bb31Saliguori {
1491c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1492a1d1bb31Saliguori 
149372cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1494a1d1bb31Saliguori         if (wp->flags & mask)
1495a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1496a1d1bb31Saliguori     }
1497c0ce998eSaliguori }
1498c527ee8fSPaul Brook #endif
1499a1d1bb31Saliguori 
1500a1d1bb31Saliguori /* Add a breakpoint.  */
1501a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1502a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
15034c3a88a2Sbellard {
15041fddef4bSbellard #if defined(TARGET_HAS_ICE)
1505c0ce998eSaliguori     CPUBreakpoint *bp;
15064c3a88a2Sbellard 
15077267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
15084c3a88a2Sbellard 
1509a1d1bb31Saliguori     bp->pc = pc;
1510a1d1bb31Saliguori     bp->flags = flags;
1511a1d1bb31Saliguori 
15122dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1513c0ce998eSaliguori     if (flags & BP_GDB)
151472cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1515c0ce998eSaliguori     else
151672cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1517d720b93dSbellard 
1518d720b93dSbellard     breakpoint_invalidate(env, pc);
1519a1d1bb31Saliguori 
1520a1d1bb31Saliguori     if (breakpoint)
1521a1d1bb31Saliguori         *breakpoint = bp;
15224c3a88a2Sbellard     return 0;
15234c3a88a2Sbellard #else
1524a1d1bb31Saliguori     return -ENOSYS;
15254c3a88a2Sbellard #endif
15264c3a88a2Sbellard }
15274c3a88a2Sbellard 
1528a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1529a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1530a1d1bb31Saliguori {
15317d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1532a1d1bb31Saliguori     CPUBreakpoint *bp;
1533a1d1bb31Saliguori 
153472cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1535a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1536a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1537a1d1bb31Saliguori             return 0;
15387d03f82fSedgar_igl         }
1539a1d1bb31Saliguori     }
1540a1d1bb31Saliguori     return -ENOENT;
1541a1d1bb31Saliguori #else
1542a1d1bb31Saliguori     return -ENOSYS;
15437d03f82fSedgar_igl #endif
15447d03f82fSedgar_igl }
15457d03f82fSedgar_igl 
1546a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1547a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
15484c3a88a2Sbellard {
15491fddef4bSbellard #if defined(TARGET_HAS_ICE)
155072cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1551d720b93dSbellard 
1552a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1553a1d1bb31Saliguori 
15547267c094SAnthony Liguori     g_free(breakpoint);
1555a1d1bb31Saliguori #endif
1556a1d1bb31Saliguori }
1557a1d1bb31Saliguori 
1558a1d1bb31Saliguori /* Remove all matching breakpoints. */
1559a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1560a1d1bb31Saliguori {
1561a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1562c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1563a1d1bb31Saliguori 
156472cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1565a1d1bb31Saliguori         if (bp->flags & mask)
1566a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1567c0ce998eSaliguori     }
15684c3a88a2Sbellard #endif
15694c3a88a2Sbellard }
15704c3a88a2Sbellard 
1571c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572c33a346eSbellard    CPU loop after each instruction */
1573c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1574c33a346eSbellard {
15751fddef4bSbellard #if defined(TARGET_HAS_ICE)
1576c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1577c33a346eSbellard         env->singlestep_enabled = enabled;
1578e22a25c9Saliguori         if (kvm_enabled())
1579e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1580e22a25c9Saliguori         else {
1581ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15829fa3e853Sbellard             /* XXX: only flush what is necessary */
15830124311eSbellard             tb_flush(env);
1584c33a346eSbellard         }
1585e22a25c9Saliguori     }
1586c33a346eSbellard #endif
1587c33a346eSbellard }
1588c33a346eSbellard 
158934865134Sbellard /* enable or disable low levels log */
159034865134Sbellard void cpu_set_log(int log_flags)
159134865134Sbellard {
159234865134Sbellard     loglevel = log_flags;
159334865134Sbellard     if (loglevel && !logfile) {
159411fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
159534865134Sbellard         if (!logfile) {
159634865134Sbellard             perror(logfilename);
159734865134Sbellard             _exit(1);
159834865134Sbellard         }
15999fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
16009fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
16019fa3e853Sbellard         {
1602b55266b5Sblueswir1             static char logfile_buf[4096];
16039fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
16049fa3e853Sbellard         }
1605daf767b1SStefan Weil #elif defined(_WIN32)
1606daf767b1SStefan Weil         /* Win32 doesn't support line-buffering, so use unbuffered output. */
1607daf767b1SStefan Weil         setvbuf(logfile, NULL, _IONBF, 0);
1608daf767b1SStefan Weil #else
160934865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
16109fa3e853Sbellard #endif
1611e735b91cSpbrook         log_append = 1;
1612e735b91cSpbrook     }
1613e735b91cSpbrook     if (!loglevel && logfile) {
1614e735b91cSpbrook         fclose(logfile);
1615e735b91cSpbrook         logfile = NULL;
161634865134Sbellard     }
161734865134Sbellard }
161834865134Sbellard 
161934865134Sbellard void cpu_set_log_filename(const char *filename)
162034865134Sbellard {
162134865134Sbellard     logfilename = strdup(filename);
1622e735b91cSpbrook     if (logfile) {
1623e735b91cSpbrook         fclose(logfile);
1624e735b91cSpbrook         logfile = NULL;
1625e735b91cSpbrook     }
1626e735b91cSpbrook     cpu_set_log(loglevel);
162734865134Sbellard }
1628c33a346eSbellard 
16293098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1630ea041c0eSbellard {
1631d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1632d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1633d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1634d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16353098dba0Saurel32     TranslationBlock *tb;
1636c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16373098dba0Saurel32 
1638cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16393098dba0Saurel32     tb = env->current_tb;
16403098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16413098dba0Saurel32        all the potentially executing TB */
1642f76cfe56SRiku Voipio     if (tb) {
16433098dba0Saurel32         env->current_tb = NULL;
16443098dba0Saurel32         tb_reset_jump_recursive(tb);
16453098dba0Saurel32     }
1646cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16473098dba0Saurel32 }
16483098dba0Saurel32 
164997ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
16503098dba0Saurel32 /* mask must never be zero, except for A20 change call */
1651ec6959d0SJan Kiszka static void tcg_handle_interrupt(CPUState *env, int mask)
16523098dba0Saurel32 {
16533098dba0Saurel32     int old_mask;
16543098dba0Saurel32 
16553098dba0Saurel32     old_mask = env->interrupt_request;
16563098dba0Saurel32     env->interrupt_request |= mask;
16573098dba0Saurel32 
16588edac960Saliguori     /*
16598edac960Saliguori      * If called from iothread context, wake the target cpu in
16608edac960Saliguori      * case its halted.
16618edac960Saliguori      */
1662b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
16638edac960Saliguori         qemu_cpu_kick(env);
16648edac960Saliguori         return;
16658edac960Saliguori     }
16668edac960Saliguori 
16672e70f6efSpbrook     if (use_icount) {
1668266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16692e70f6efSpbrook         if (!can_do_io(env)
1670be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16712e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16722e70f6efSpbrook         }
16732e70f6efSpbrook     } else {
16743098dba0Saurel32         cpu_unlink_tb(env);
1675ea041c0eSbellard     }
16762e70f6efSpbrook }
1677ea041c0eSbellard 
1678ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1679ec6959d0SJan Kiszka 
168097ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
168197ffbd8dSJan Kiszka 
168297ffbd8dSJan Kiszka void cpu_interrupt(CPUState *env, int mask)
168397ffbd8dSJan Kiszka {
168497ffbd8dSJan Kiszka     env->interrupt_request |= mask;
168597ffbd8dSJan Kiszka     cpu_unlink_tb(env);
168697ffbd8dSJan Kiszka }
168797ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
168897ffbd8dSJan Kiszka 
1689b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1690b54ad049Sbellard {
1691b54ad049Sbellard     env->interrupt_request &= ~mask;
1692b54ad049Sbellard }
1693b54ad049Sbellard 
16943098dba0Saurel32 void cpu_exit(CPUState *env)
16953098dba0Saurel32 {
16963098dba0Saurel32     env->exit_request = 1;
16973098dba0Saurel32     cpu_unlink_tb(env);
16983098dba0Saurel32 }
16993098dba0Saurel32 
1700c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1701f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1702f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1703f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1704f193c797Sbellard       "show target assembly code for each compiled TB" },
1705f193c797Sbellard     { CPU_LOG_TB_OP, "op",
170657fec1feSbellard       "show micro ops for each compiled TB" },
1707f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1708e01a1157Sblueswir1       "show micro ops "
1709e01a1157Sblueswir1 #ifdef TARGET_I386
1710e01a1157Sblueswir1       "before eflags optimization and "
1711f193c797Sbellard #endif
1712e01a1157Sblueswir1       "after liveness analysis" },
1713f193c797Sbellard     { CPU_LOG_INT, "int",
1714f193c797Sbellard       "show interrupts/exceptions in short format" },
1715f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1716f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
17179fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1718e91c8a77Sths       "show CPU state before block translation" },
1719f193c797Sbellard #ifdef TARGET_I386
1720f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1721f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1722eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1723eca1bdf4Saliguori       "show CPU state before CPU resets" },
1724f193c797Sbellard #endif
17258e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1726fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1727fd872598Sbellard       "show all i/o ports accesses" },
17288e3a9fd2Sbellard #endif
1729f193c797Sbellard     { 0, NULL, NULL },
1730f193c797Sbellard };
1731f193c797Sbellard 
1732f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1733f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1734f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1735f6f3fbcaSMichael S. Tsirkin 
1736f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1737f6f3fbcaSMichael S. Tsirkin                                   ram_addr_t size,
17380fd542fbSMichael S. Tsirkin                                   ram_addr_t phys_offset,
17390fd542fbSMichael S. Tsirkin                                   bool log_dirty)
1740f6f3fbcaSMichael S. Tsirkin {
1741f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1742f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
17430fd542fbSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1744f6f3fbcaSMichael S. Tsirkin     }
1745f6f3fbcaSMichael S. Tsirkin }
1746f6f3fbcaSMichael S. Tsirkin 
1747f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1748f6f3fbcaSMichael S. Tsirkin                                         target_phys_addr_t end)
1749f6f3fbcaSMichael S. Tsirkin {
1750f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1751f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1752f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1753f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1754f6f3fbcaSMichael S. Tsirkin             return r;
1755f6f3fbcaSMichael S. Tsirkin     }
1756f6f3fbcaSMichael S. Tsirkin     return 0;
1757f6f3fbcaSMichael S. Tsirkin }
1758f6f3fbcaSMichael S. Tsirkin 
1759f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1760f6f3fbcaSMichael S. Tsirkin {
1761f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1762f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1763f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1764f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1765f6f3fbcaSMichael S. Tsirkin             return r;
1766f6f3fbcaSMichael S. Tsirkin     }
1767f6f3fbcaSMichael S. Tsirkin     return 0;
1768f6f3fbcaSMichael S. Tsirkin }
1769f6f3fbcaSMichael S. Tsirkin 
17702173a75fSAlex Williamson struct last_map {
17712173a75fSAlex Williamson     target_phys_addr_t start_addr;
17722173a75fSAlex Williamson     ram_addr_t size;
17732173a75fSAlex Williamson     ram_addr_t phys_offset;
17742173a75fSAlex Williamson };
17752173a75fSAlex Williamson 
17768d4c78e7SAlex Williamson /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
17778d4c78e7SAlex Williamson  * address.  Each intermediate table provides the next L2_BITs of guest
17788d4c78e7SAlex Williamson  * physical address space.  The number of levels vary based on host and
17798d4c78e7SAlex Williamson  * guest configuration, making it efficient to build the final guest
17808d4c78e7SAlex Williamson  * physical address by seeding the L1 offset and shifting and adding in
17818d4c78e7SAlex Williamson  * each L2 offset as we recurse through them. */
17822173a75fSAlex Williamson static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
17832173a75fSAlex Williamson                                  void **lp, target_phys_addr_t addr,
17842173a75fSAlex Williamson                                  struct last_map *map)
1785f6f3fbcaSMichael S. Tsirkin {
17865cd2c5b6SRichard Henderson     int i;
1787f6f3fbcaSMichael S. Tsirkin 
17885cd2c5b6SRichard Henderson     if (*lp == NULL) {
17895cd2c5b6SRichard Henderson         return;
1790f6f3fbcaSMichael S. Tsirkin     }
17915cd2c5b6SRichard Henderson     if (level == 0) {
17925cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17938d4c78e7SAlex Williamson         addr <<= L2_BITS + TARGET_PAGE_BITS;
17947296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17955cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17962173a75fSAlex Williamson                 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
17972173a75fSAlex Williamson 
17982173a75fSAlex Williamson                 if (map->size &&
17992173a75fSAlex Williamson                     start_addr == map->start_addr + map->size &&
18002173a75fSAlex Williamson                     pd[i].phys_offset == map->phys_offset + map->size) {
18012173a75fSAlex Williamson 
18022173a75fSAlex Williamson                     map->size += TARGET_PAGE_SIZE;
18032173a75fSAlex Williamson                     continue;
18042173a75fSAlex Williamson                 } else if (map->size) {
18052173a75fSAlex Williamson                     client->set_memory(client, map->start_addr,
18062173a75fSAlex Williamson                                        map->size, map->phys_offset, false);
18072173a75fSAlex Williamson                 }
18082173a75fSAlex Williamson 
18092173a75fSAlex Williamson                 map->start_addr = start_addr;
18102173a75fSAlex Williamson                 map->size = TARGET_PAGE_SIZE;
18112173a75fSAlex Williamson                 map->phys_offset = pd[i].phys_offset;
1812f6f3fbcaSMichael S. Tsirkin             }
18135cd2c5b6SRichard Henderson         }
18145cd2c5b6SRichard Henderson     } else {
18155cd2c5b6SRichard Henderson         void **pp = *lp;
18167296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
18178d4c78e7SAlex Williamson             phys_page_for_each_1(client, level - 1, pp + i,
18182173a75fSAlex Williamson                                  (addr << L2_BITS) | i, map);
1819f6f3fbcaSMichael S. Tsirkin         }
1820f6f3fbcaSMichael S. Tsirkin     }
1821f6f3fbcaSMichael S. Tsirkin }
1822f6f3fbcaSMichael S. Tsirkin 
1823f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1824f6f3fbcaSMichael S. Tsirkin {
18255cd2c5b6SRichard Henderson     int i;
18262173a75fSAlex Williamson     struct last_map map = { };
18272173a75fSAlex Williamson 
18285cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
18295cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
18302173a75fSAlex Williamson                              l1_phys_map + i, i, &map);
18312173a75fSAlex Williamson     }
18322173a75fSAlex Williamson     if (map.size) {
18332173a75fSAlex Williamson         client->set_memory(client, map.start_addr, map.size, map.phys_offset,
18342173a75fSAlex Williamson                            false);
1835f6f3fbcaSMichael S. Tsirkin     }
1836f6f3fbcaSMichael S. Tsirkin }
1837f6f3fbcaSMichael S. Tsirkin 
1838f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1839f6f3fbcaSMichael S. Tsirkin {
1840f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1841f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1842f6f3fbcaSMichael S. Tsirkin }
1843f6f3fbcaSMichael S. Tsirkin 
1844f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1845f6f3fbcaSMichael S. Tsirkin {
1846f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1847f6f3fbcaSMichael S. Tsirkin }
1848f6f3fbcaSMichael S. Tsirkin #endif
1849f6f3fbcaSMichael S. Tsirkin 
1850f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1851f193c797Sbellard {
1852f193c797Sbellard     if (strlen(s2) != n)
1853f193c797Sbellard         return 0;
1854f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1855f193c797Sbellard }
1856f193c797Sbellard 
1857f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1858f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1859f193c797Sbellard {
1860c7cd6a37Sblueswir1     const CPULogItem *item;
1861f193c797Sbellard     int mask;
1862f193c797Sbellard     const char *p, *p1;
1863f193c797Sbellard 
1864f193c797Sbellard     p = str;
1865f193c797Sbellard     mask = 0;
1866f193c797Sbellard     for(;;) {
1867f193c797Sbellard         p1 = strchr(p, ',');
1868f193c797Sbellard         if (!p1)
1869f193c797Sbellard             p1 = p + strlen(p);
18708e3a9fd2Sbellard         if(cmp1(p,p1-p,"all")) {
18718e3a9fd2Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
18728e3a9fd2Sbellard                 mask |= item->mask;
18738e3a9fd2Sbellard             }
18748e3a9fd2Sbellard         } else {
1875f193c797Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
1876f193c797Sbellard                 if (cmp1(p, p1 - p, item->name))
1877f193c797Sbellard                     goto found;
1878f193c797Sbellard             }
1879f193c797Sbellard             return 0;
18808e3a9fd2Sbellard         }
1881f193c797Sbellard     found:
1882f193c797Sbellard         mask |= item->mask;
1883f193c797Sbellard         if (*p1 != ',')
1884f193c797Sbellard             break;
1885f193c797Sbellard         p = p1 + 1;
1886f193c797Sbellard     }
1887f193c797Sbellard     return mask;
1888f193c797Sbellard }
1889ea041c0eSbellard 
18907501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
18917501267eSbellard {
18927501267eSbellard     va_list ap;
1893493ae1f0Spbrook     va_list ap2;
18947501267eSbellard 
18957501267eSbellard     va_start(ap, fmt);
1896493ae1f0Spbrook     va_copy(ap2, ap);
18977501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18987501267eSbellard     vfprintf(stderr, fmt, ap);
18997501267eSbellard     fprintf(stderr, "\n");
19007501267eSbellard #ifdef TARGET_I386
19017fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
19027fe48483Sbellard #else
19037fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
19047501267eSbellard #endif
190593fcfe39Saliguori     if (qemu_log_enabled()) {
190693fcfe39Saliguori         qemu_log("qemu: fatal: ");
190793fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
190893fcfe39Saliguori         qemu_log("\n");
1909f9373291Sj_mayer #ifdef TARGET_I386
191093fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1911f9373291Sj_mayer #else
191293fcfe39Saliguori         log_cpu_state(env, 0);
1913f9373291Sj_mayer #endif
191431b1a7b4Saliguori         qemu_log_flush();
191593fcfe39Saliguori         qemu_log_close();
1916924edcaeSbalrog     }
1917493ae1f0Spbrook     va_end(ap2);
1918f9373291Sj_mayer     va_end(ap);
1919fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1920fd052bf6SRiku Voipio     {
1921fd052bf6SRiku Voipio         struct sigaction act;
1922fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1923fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1924fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1925fd052bf6SRiku Voipio     }
1926fd052bf6SRiku Voipio #endif
19277501267eSbellard     abort();
19287501267eSbellard }
19297501267eSbellard 
1930c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1931c5be9f08Sths {
193201ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1933c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1934c5be9f08Sths     int cpu_index = new_env->cpu_index;
19355a38f081Saliguori #if defined(TARGET_HAS_ICE)
19365a38f081Saliguori     CPUBreakpoint *bp;
19375a38f081Saliguori     CPUWatchpoint *wp;
19385a38f081Saliguori #endif
19395a38f081Saliguori 
1940c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
19415a38f081Saliguori 
19425a38f081Saliguori     /* Preserve chaining and index. */
1943c5be9f08Sths     new_env->next_cpu = next_cpu;
1944c5be9f08Sths     new_env->cpu_index = cpu_index;
19455a38f081Saliguori 
19465a38f081Saliguori     /* Clone all break/watchpoints.
19475a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
19485a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
194972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
195072cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
19515a38f081Saliguori #if defined(TARGET_HAS_ICE)
195272cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
19535a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
19545a38f081Saliguori     }
195572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
19565a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
19575a38f081Saliguori                               wp->flags, NULL);
19585a38f081Saliguori     }
19595a38f081Saliguori #endif
19605a38f081Saliguori 
1961c5be9f08Sths     return new_env;
1962c5be9f08Sths }
1963c5be9f08Sths 
19640124311eSbellard #if !defined(CONFIG_USER_ONLY)
19650124311eSbellard 
19665c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
19675c751e99Sedgar_igl {
19685c751e99Sedgar_igl     unsigned int i;
19695c751e99Sedgar_igl 
19705c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
19715c751e99Sedgar_igl        overlap the flushed page.  */
19725c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
19735c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19745c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19755c751e99Sedgar_igl 
19765c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
19775c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19785c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19795c751e99Sedgar_igl }
19805c751e99Sedgar_igl 
198108738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
198208738984SIgor Kovalenko     .addr_read  = -1,
198308738984SIgor Kovalenko     .addr_write = -1,
198408738984SIgor Kovalenko     .addr_code  = -1,
198508738984SIgor Kovalenko     .addend     = -1,
198608738984SIgor Kovalenko };
198708738984SIgor Kovalenko 
1988ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1989ee8b7021Sbellard    implemented yet) */
1990ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
199133417e70Sbellard {
199233417e70Sbellard     int i;
19930124311eSbellard 
19949fa3e853Sbellard #if defined(DEBUG_TLB)
19959fa3e853Sbellard     printf("tlb_flush:\n");
19969fa3e853Sbellard #endif
19970124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19980124311eSbellard        links while we are modifying them */
19990124311eSbellard     env->current_tb = NULL;
20000124311eSbellard 
200133417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
2002cfde4bd9SIsaku Yamahata         int mmu_idx;
2003cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
200408738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2005cfde4bd9SIsaku Yamahata         }
200633417e70Sbellard     }
20079fa3e853Sbellard 
20088a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
20099fa3e853Sbellard 
2010d4c430a8SPaul Brook     env->tlb_flush_addr = -1;
2011d4c430a8SPaul Brook     env->tlb_flush_mask = 0;
2012e3db7226Sbellard     tlb_flush_count++;
201333417e70Sbellard }
201433417e70Sbellard 
2015274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
201661382a50Sbellard {
201784b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
201884b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
201984b7b8e7Sbellard         addr == (tlb_entry->addr_write &
202084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
202184b7b8e7Sbellard         addr == (tlb_entry->addr_code &
202284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
202308738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
202484b7b8e7Sbellard     }
202561382a50Sbellard }
202661382a50Sbellard 
20272e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
202833417e70Sbellard {
20298a40a180Sbellard     int i;
2030cfde4bd9SIsaku Yamahata     int mmu_idx;
20310124311eSbellard 
20329fa3e853Sbellard #if defined(DEBUG_TLB)
2033108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
20349fa3e853Sbellard #endif
2035d4c430a8SPaul Brook     /* Check if we need to flush due to large pages.  */
2036d4c430a8SPaul Brook     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2037d4c430a8SPaul Brook #if defined(DEBUG_TLB)
2038d4c430a8SPaul Brook         printf("tlb_flush_page: forced full flush ("
2039d4c430a8SPaul Brook                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2040d4c430a8SPaul Brook                env->tlb_flush_addr, env->tlb_flush_mask);
2041d4c430a8SPaul Brook #endif
2042d4c430a8SPaul Brook         tlb_flush(env, 1);
2043d4c430a8SPaul Brook         return;
2044d4c430a8SPaul Brook     }
20450124311eSbellard     /* must reset current TB so that interrupts cannot modify the
20460124311eSbellard        links while we are modifying them */
20470124311eSbellard     env->current_tb = NULL;
204833417e70Sbellard 
204961382a50Sbellard     addr &= TARGET_PAGE_MASK;
205033417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2051cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2052cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
20530124311eSbellard 
20545c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
20559fa3e853Sbellard }
20569fa3e853Sbellard 
20579fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
20589fa3e853Sbellard    can be detected */
2059c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
206061382a50Sbellard {
20616a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
20626a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
20636a00d601Sbellard                                     CODE_DIRTY_FLAG);
20649fa3e853Sbellard }
20659fa3e853Sbellard 
20669fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
20673a7d929eSbellard    tested for self modifying code */
2068c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
20693a7d929eSbellard                                     target_ulong vaddr)
20709fa3e853Sbellard {
2071f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
20729fa3e853Sbellard }
20739fa3e853Sbellard 
20741ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
20751ccde1cbSbellard                                          unsigned long start, unsigned long length)
20761ccde1cbSbellard {
20771ccde1cbSbellard     unsigned long addr;
207884b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
207984b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
20801ccde1cbSbellard         if ((addr - start) < length) {
20810f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
20821ccde1cbSbellard         }
20831ccde1cbSbellard     }
20841ccde1cbSbellard }
20851ccde1cbSbellard 
20865579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
2087c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
20880a962c02Sbellard                                      int dirty_flags)
20891ccde1cbSbellard {
20901ccde1cbSbellard     CPUState *env;
20914f2ac237Sbellard     unsigned long length, start1;
2092f7c11b53SYoshiaki Tamura     int i;
20931ccde1cbSbellard 
20941ccde1cbSbellard     start &= TARGET_PAGE_MASK;
20951ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
20961ccde1cbSbellard 
20971ccde1cbSbellard     length = end - start;
20981ccde1cbSbellard     if (length == 0)
20991ccde1cbSbellard         return;
2100f7c11b53SYoshiaki Tamura     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2101f23db169Sbellard 
21021ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
21031ccde1cbSbellard        when accessing the range */
2104b2e0a138SMichael S. Tsirkin     start1 = (unsigned long)qemu_safe_ram_ptr(start);
2105a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
21065579c7f3Spbrook        address comparisons below.  */
2107b2e0a138SMichael S. Tsirkin     if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
21085579c7f3Spbrook             != (end - 1) - start) {
21095579c7f3Spbrook         abort();
21105579c7f3Spbrook     }
21115579c7f3Spbrook 
21126a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2113cfde4bd9SIsaku Yamahata         int mmu_idx;
2114cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21151ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2116cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2117cfde4bd9SIsaku Yamahata                                       start1, length);
2118cfde4bd9SIsaku Yamahata         }
21196a00d601Sbellard     }
21201ccde1cbSbellard }
21211ccde1cbSbellard 
212274576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
212374576198Saliguori {
2124f6f3fbcaSMichael S. Tsirkin     int ret = 0;
212574576198Saliguori     in_migration = enable;
2126f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2127f6f3fbcaSMichael S. Tsirkin     return ret;
212874576198Saliguori }
212974576198Saliguori 
213074576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
213174576198Saliguori {
213274576198Saliguori     return in_migration;
213374576198Saliguori }
213474576198Saliguori 
2135c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2136c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
21372bec46dcSaliguori {
21387b8f3b78SMichael S. Tsirkin     int ret;
2139151f7749SJan Kiszka 
2140f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2141151f7749SJan Kiszka     return ret;
21422bec46dcSaliguori }
21432bec46dcSaliguori 
2144e5896b12SAnthony PERARD int cpu_physical_log_start(target_phys_addr_t start_addr,
2145e5896b12SAnthony PERARD                            ram_addr_t size)
2146e5896b12SAnthony PERARD {
2147e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2148e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2149e5896b12SAnthony PERARD         if (client->log_start) {
2150e5896b12SAnthony PERARD             int r = client->log_start(client, start_addr, size);
2151e5896b12SAnthony PERARD             if (r < 0) {
2152e5896b12SAnthony PERARD                 return r;
2153e5896b12SAnthony PERARD             }
2154e5896b12SAnthony PERARD         }
2155e5896b12SAnthony PERARD     }
2156e5896b12SAnthony PERARD     return 0;
2157e5896b12SAnthony PERARD }
2158e5896b12SAnthony PERARD 
2159e5896b12SAnthony PERARD int cpu_physical_log_stop(target_phys_addr_t start_addr,
2160e5896b12SAnthony PERARD                           ram_addr_t size)
2161e5896b12SAnthony PERARD {
2162e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2163e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2164e5896b12SAnthony PERARD         if (client->log_stop) {
2165e5896b12SAnthony PERARD             int r = client->log_stop(client, start_addr, size);
2166e5896b12SAnthony PERARD             if (r < 0) {
2167e5896b12SAnthony PERARD                 return r;
2168e5896b12SAnthony PERARD             }
2169e5896b12SAnthony PERARD         }
2170e5896b12SAnthony PERARD     }
2171e5896b12SAnthony PERARD     return 0;
2172e5896b12SAnthony PERARD }
2173e5896b12SAnthony PERARD 
21743a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
21753a7d929eSbellard {
2176c227f099SAnthony Liguori     ram_addr_t ram_addr;
21775579c7f3Spbrook     void *p;
21783a7d929eSbellard 
217984b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
21805579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
21815579c7f3Spbrook             + tlb_entry->addend);
2182e890261fSMarcelo Tosatti         ram_addr = qemu_ram_addr_from_host_nofail(p);
21833a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
21840f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
21853a7d929eSbellard         }
21863a7d929eSbellard     }
21873a7d929eSbellard }
21883a7d929eSbellard 
21893a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
21903a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
21913a7d929eSbellard {
21923a7d929eSbellard     int i;
2193cfde4bd9SIsaku Yamahata     int mmu_idx;
2194cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21953a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2196cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2197cfde4bd9SIsaku Yamahata     }
21983a7d929eSbellard }
21993a7d929eSbellard 
22000f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
22011ccde1cbSbellard {
22020f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
22030f459d16Spbrook         tlb_entry->addr_write = vaddr;
22041ccde1cbSbellard }
22051ccde1cbSbellard 
22060f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
22070f459d16Spbrook    so that it is no longer dirty */
22080f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
22091ccde1cbSbellard {
22101ccde1cbSbellard     int i;
2211cfde4bd9SIsaku Yamahata     int mmu_idx;
22121ccde1cbSbellard 
22130f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
22141ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2215cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2216cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
22171ccde1cbSbellard }
22181ccde1cbSbellard 
2219d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by
2220d4c430a8SPaul Brook    large pages and trigger a full TLB flush if these are invalidated.  */
2221d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2222d4c430a8SPaul Brook                                target_ulong size)
2223d4c430a8SPaul Brook {
2224d4c430a8SPaul Brook     target_ulong mask = ~(size - 1);
2225d4c430a8SPaul Brook 
2226d4c430a8SPaul Brook     if (env->tlb_flush_addr == (target_ulong)-1) {
2227d4c430a8SPaul Brook         env->tlb_flush_addr = vaddr & mask;
2228d4c430a8SPaul Brook         env->tlb_flush_mask = mask;
2229d4c430a8SPaul Brook         return;
2230d4c430a8SPaul Brook     }
2231d4c430a8SPaul Brook     /* Extend the existing region to include the new page.
2232d4c430a8SPaul Brook        This is a compromise between unnecessary flushes and the cost
2233d4c430a8SPaul Brook        of maintaining a full variable size TLB.  */
2234d4c430a8SPaul Brook     mask &= env->tlb_flush_mask;
2235d4c430a8SPaul Brook     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2236d4c430a8SPaul Brook         mask <<= 1;
2237d4c430a8SPaul Brook     }
2238d4c430a8SPaul Brook     env->tlb_flush_addr &= mask;
2239d4c430a8SPaul Brook     env->tlb_flush_mask = mask;
2240d4c430a8SPaul Brook }
2241d4c430a8SPaul Brook 
2242d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address
2243d4c430a8SPaul Brook    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2244d4c430a8SPaul Brook    supplied size is only used by tlb_flush_page.  */
2245d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr,
2246c227f099SAnthony Liguori                   target_phys_addr_t paddr, int prot,
2247d4c430a8SPaul Brook                   int mmu_idx, target_ulong size)
22489fa3e853Sbellard {
224992e873b9Sbellard     PhysPageDesc *p;
22504f2ac237Sbellard     unsigned long pd;
22519fa3e853Sbellard     unsigned int index;
22524f2ac237Sbellard     target_ulong address;
22530f459d16Spbrook     target_ulong code_address;
2254355b1943SPaul Brook     unsigned long addend;
225584b7b8e7Sbellard     CPUTLBEntry *te;
2256a1d1bb31Saliguori     CPUWatchpoint *wp;
2257c227f099SAnthony Liguori     target_phys_addr_t iotlb;
22589fa3e853Sbellard 
2259d4c430a8SPaul Brook     assert(size >= TARGET_PAGE_SIZE);
2260d4c430a8SPaul Brook     if (size != TARGET_PAGE_SIZE) {
2261d4c430a8SPaul Brook         tlb_add_large_page(env, vaddr, size);
2262d4c430a8SPaul Brook     }
226392e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
22649fa3e853Sbellard     if (!p) {
22659fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
22669fa3e853Sbellard     } else {
22679fa3e853Sbellard         pd = p->phys_offset;
22689fa3e853Sbellard     }
22699fa3e853Sbellard #if defined(DEBUG_TLB)
22707fd3f494SStefan Weil     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
22717fd3f494SStefan Weil            " prot=%x idx=%d pd=0x%08lx\n",
22727fd3f494SStefan Weil            vaddr, paddr, prot, mmu_idx, pd);
22739fa3e853Sbellard #endif
22749fa3e853Sbellard 
22759fa3e853Sbellard     address = vaddr;
22760f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
22770f459d16Spbrook         /* IO memory case (romd handled later) */
22780f459d16Spbrook         address |= TLB_MMIO;
22790f459d16Spbrook     }
22805579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
22810f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
22820f459d16Spbrook         /* Normal RAM.  */
22830f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
22840f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
22850f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
22860f459d16Spbrook         else
22870f459d16Spbrook             iotlb |= IO_MEM_ROM;
22880f459d16Spbrook     } else {
2289ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
22900f459d16Spbrook            It would be nice to pass an offset from the base address
22910f459d16Spbrook            of that region.  This would avoid having to special case RAM,
22920f459d16Spbrook            and avoid full address decoding in every device.
22930f459d16Spbrook            We can't use the high bits of pd for this because
22940f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
22958da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
22968da3ff18Spbrook         if (p) {
22978da3ff18Spbrook             iotlb += p->region_offset;
22988da3ff18Spbrook         } else {
22998da3ff18Spbrook             iotlb += paddr;
23008da3ff18Spbrook         }
23019fa3e853Sbellard     }
23029fa3e853Sbellard 
23030f459d16Spbrook     code_address = address;
23046658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
23056658ffb8Spbrook        watchpoint trap routines.  */
230672cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2307a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2308bf298f83SJun Koi             /* Avoid trapping reads of pages with a write breakpoint. */
2309bf298f83SJun Koi             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
23100f459d16Spbrook                 iotlb = io_mem_watch + paddr;
23110f459d16Spbrook                 address |= TLB_MMIO;
2312bf298f83SJun Koi                 break;
2313bf298f83SJun Koi             }
23146658ffb8Spbrook         }
23156658ffb8Spbrook     }
23166658ffb8Spbrook 
231790f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
23180f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
23196ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
23200f459d16Spbrook     te->addend = addend - vaddr;
232167b915a5Sbellard     if (prot & PAGE_READ) {
232284b7b8e7Sbellard         te->addr_read = address;
23239fa3e853Sbellard     } else {
232484b7b8e7Sbellard         te->addr_read = -1;
232584b7b8e7Sbellard     }
23265c751e99Sedgar_igl 
232784b7b8e7Sbellard     if (prot & PAGE_EXEC) {
23280f459d16Spbrook         te->addr_code = code_address;
232984b7b8e7Sbellard     } else {
233084b7b8e7Sbellard         te->addr_code = -1;
23319fa3e853Sbellard     }
233267b915a5Sbellard     if (prot & PAGE_WRITE) {
2333856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2334856074ecSbellard             (pd & IO_MEM_ROMD)) {
23350f459d16Spbrook             /* Write access calls the I/O callback.  */
23360f459d16Spbrook             te->addr_write = address | TLB_MMIO;
23373a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
23381ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
23390f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
23409fa3e853Sbellard         } else {
234184b7b8e7Sbellard             te->addr_write = address;
23429fa3e853Sbellard         }
23439fa3e853Sbellard     } else {
234484b7b8e7Sbellard         te->addr_write = -1;
23459fa3e853Sbellard     }
23469fa3e853Sbellard }
23479fa3e853Sbellard 
23480124311eSbellard #else
23490124311eSbellard 
2350ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
23510124311eSbellard {
23520124311eSbellard }
23530124311eSbellard 
23542e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
23550124311eSbellard {
23560124311eSbellard }
23570124311eSbellard 
2358edf8e2afSMika Westerberg /*
2359edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2360edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2361edf8e2afSMika Westerberg  */
23625cd2c5b6SRichard Henderson 
23635cd2c5b6SRichard Henderson struct walk_memory_regions_data
236433417e70Sbellard {
23655cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
23665cd2c5b6SRichard Henderson     void *priv;
23675cd2c5b6SRichard Henderson     unsigned long start;
23685cd2c5b6SRichard Henderson     int prot;
23695cd2c5b6SRichard Henderson };
23709fa3e853Sbellard 
23715cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2372b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
23735cd2c5b6SRichard Henderson {
23745cd2c5b6SRichard Henderson     if (data->start != -1ul) {
23755cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
23765cd2c5b6SRichard Henderson         if (rc != 0) {
23775cd2c5b6SRichard Henderson             return rc;
23785cd2c5b6SRichard Henderson         }
23795cd2c5b6SRichard Henderson     }
2380edf8e2afSMika Westerberg 
23815cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
23825cd2c5b6SRichard Henderson     data->prot = new_prot;
23835cd2c5b6SRichard Henderson 
23845cd2c5b6SRichard Henderson     return 0;
238533417e70Sbellard }
23865cd2c5b6SRichard Henderson 
23875cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2388b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
23895cd2c5b6SRichard Henderson {
2390b480d9b7SPaul Brook     abi_ulong pa;
23915cd2c5b6SRichard Henderson     int i, rc;
23925cd2c5b6SRichard Henderson 
23935cd2c5b6SRichard Henderson     if (*lp == NULL) {
23945cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
23959fa3e853Sbellard     }
23965cd2c5b6SRichard Henderson 
23975cd2c5b6SRichard Henderson     if (level == 0) {
23985cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
23997296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
24005cd2c5b6SRichard Henderson             int prot = pd[i].flags;
24015cd2c5b6SRichard Henderson 
24025cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
24035cd2c5b6SRichard Henderson             if (prot != data->prot) {
24045cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
24055cd2c5b6SRichard Henderson                 if (rc != 0) {
24065cd2c5b6SRichard Henderson                     return rc;
24079fa3e853Sbellard                 }
24089fa3e853Sbellard             }
24095cd2c5b6SRichard Henderson         }
24105cd2c5b6SRichard Henderson     } else {
24115cd2c5b6SRichard Henderson         void **pp = *lp;
24127296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
2413b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
2414b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
24155cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
24165cd2c5b6SRichard Henderson             if (rc != 0) {
24175cd2c5b6SRichard Henderson                 return rc;
24185cd2c5b6SRichard Henderson             }
24195cd2c5b6SRichard Henderson         }
24205cd2c5b6SRichard Henderson     }
24215cd2c5b6SRichard Henderson 
24225cd2c5b6SRichard Henderson     return 0;
24235cd2c5b6SRichard Henderson }
24245cd2c5b6SRichard Henderson 
24255cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
24265cd2c5b6SRichard Henderson {
24275cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
24285cd2c5b6SRichard Henderson     unsigned long i;
24295cd2c5b6SRichard Henderson 
24305cd2c5b6SRichard Henderson     data.fn = fn;
24315cd2c5b6SRichard Henderson     data.priv = priv;
24325cd2c5b6SRichard Henderson     data.start = -1ul;
24335cd2c5b6SRichard Henderson     data.prot = 0;
24345cd2c5b6SRichard Henderson 
24355cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2436b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
24375cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
24385cd2c5b6SRichard Henderson         if (rc != 0) {
24395cd2c5b6SRichard Henderson             return rc;
24405cd2c5b6SRichard Henderson         }
24415cd2c5b6SRichard Henderson     }
24425cd2c5b6SRichard Henderson 
24435cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2444edf8e2afSMika Westerberg }
2445edf8e2afSMika Westerberg 
2446b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2447b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2448edf8e2afSMika Westerberg {
2449edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2450edf8e2afSMika Westerberg 
2451b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2452b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2453edf8e2afSMika Westerberg         start, end, end - start,
2454edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2455edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2456edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2457edf8e2afSMika Westerberg 
2458edf8e2afSMika Westerberg     return (0);
2459edf8e2afSMika Westerberg }
2460edf8e2afSMika Westerberg 
2461edf8e2afSMika Westerberg /* dump memory mappings */
2462edf8e2afSMika Westerberg void page_dump(FILE *f)
2463edf8e2afSMika Westerberg {
2464edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2465edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2466edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
24679fa3e853Sbellard }
24689fa3e853Sbellard 
246953a5960aSpbrook int page_get_flags(target_ulong address)
24709fa3e853Sbellard {
24719fa3e853Sbellard     PageDesc *p;
24729fa3e853Sbellard 
24739fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
24749fa3e853Sbellard     if (!p)
24759fa3e853Sbellard         return 0;
24769fa3e853Sbellard     return p->flags;
24779fa3e853Sbellard }
24789fa3e853Sbellard 
2479376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2480376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2481376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
248253a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
24839fa3e853Sbellard {
2484376a7909SRichard Henderson     target_ulong addr, len;
24859fa3e853Sbellard 
2486376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2487376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2488376a7909SRichard Henderson        a missing call to h2g_valid.  */
2489b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2490b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2491376a7909SRichard Henderson #endif
2492376a7909SRichard Henderson     assert(start < end);
2493376a7909SRichard Henderson 
24949fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
24959fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2496376a7909SRichard Henderson 
2497376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
24989fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2499376a7909SRichard Henderson     }
2500376a7909SRichard Henderson 
2501376a7909SRichard Henderson     for (addr = start, len = end - start;
2502376a7909SRichard Henderson          len != 0;
2503376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2504376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2505376a7909SRichard Henderson 
2506376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2507376a7909SRichard Henderson            the code inside.  */
25089fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
25099fa3e853Sbellard             (flags & PAGE_WRITE) &&
25109fa3e853Sbellard             p->first_tb) {
2511d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
25129fa3e853Sbellard         }
25139fa3e853Sbellard         p->flags = flags;
25149fa3e853Sbellard     }
25159fa3e853Sbellard }
25169fa3e853Sbellard 
25173d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
25183d97b40bSths {
25193d97b40bSths     PageDesc *p;
25203d97b40bSths     target_ulong end;
25213d97b40bSths     target_ulong addr;
25223d97b40bSths 
2523376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2524376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2525376a7909SRichard Henderson        a missing call to h2g_valid.  */
2526338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2527338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2528376a7909SRichard Henderson #endif
2529376a7909SRichard Henderson 
25303e0650a9SRichard Henderson     if (len == 0) {
25313e0650a9SRichard Henderson         return 0;
25323e0650a9SRichard Henderson     }
2533376a7909SRichard Henderson     if (start + len - 1 < start) {
2534376a7909SRichard Henderson         /* We've wrapped around.  */
253555f280c9Sbalrog         return -1;
2536376a7909SRichard Henderson     }
253755f280c9Sbalrog 
25383d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
25393d97b40bSths     start = start & TARGET_PAGE_MASK;
25403d97b40bSths 
2541376a7909SRichard Henderson     for (addr = start, len = end - start;
2542376a7909SRichard Henderson          len != 0;
2543376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
25443d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
25453d97b40bSths         if( !p )
25463d97b40bSths             return -1;
25473d97b40bSths         if( !(p->flags & PAGE_VALID) )
25483d97b40bSths             return -1;
25493d97b40bSths 
2550dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
25513d97b40bSths             return -1;
2552dae3270cSbellard         if (flags & PAGE_WRITE) {
2553dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
25543d97b40bSths                 return -1;
2555dae3270cSbellard             /* unprotect the page if it was put read-only because it
2556dae3270cSbellard                contains translated code */
2557dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2558dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2559dae3270cSbellard                     return -1;
2560dae3270cSbellard             }
2561dae3270cSbellard             return 0;
2562dae3270cSbellard         }
25633d97b40bSths     }
25643d97b40bSths     return 0;
25653d97b40bSths }
25663d97b40bSths 
25679fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2568ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
256953a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
25709fa3e853Sbellard {
257145d679d6SAurelien Jarno     unsigned int prot;
257245d679d6SAurelien Jarno     PageDesc *p;
257353a5960aSpbrook     target_ulong host_start, host_end, addr;
25749fa3e853Sbellard 
2575c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2576c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2577c8a706feSpbrook        practice it seems to be ok.  */
2578c8a706feSpbrook     mmap_lock();
2579c8a706feSpbrook 
258045d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
258145d679d6SAurelien Jarno     if (!p) {
2582c8a706feSpbrook         mmap_unlock();
25839fa3e853Sbellard         return 0;
2584c8a706feSpbrook     }
258545d679d6SAurelien Jarno 
25869fa3e853Sbellard     /* if the page was really writable, then we change its
25879fa3e853Sbellard        protection back to writable */
258845d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
258945d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
259045d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
259145d679d6SAurelien Jarno 
259245d679d6SAurelien Jarno         prot = 0;
259345d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
259445d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
259545d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
259645d679d6SAurelien Jarno             prot |= p->flags;
259745d679d6SAurelien Jarno 
25989fa3e853Sbellard             /* and since the content will be modified, we must invalidate
25999fa3e853Sbellard                the corresponding translated code. */
260045d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
26019fa3e853Sbellard #ifdef DEBUG_TB_CHECK
260245d679d6SAurelien Jarno             tb_invalidate_check(addr);
26039fa3e853Sbellard #endif
260445d679d6SAurelien Jarno         }
260545d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
260645d679d6SAurelien Jarno                  prot & PAGE_BITS);
260745d679d6SAurelien Jarno 
2608c8a706feSpbrook         mmap_unlock();
26099fa3e853Sbellard         return 1;
26109fa3e853Sbellard     }
2611c8a706feSpbrook     mmap_unlock();
26129fa3e853Sbellard     return 0;
26139fa3e853Sbellard }
26149fa3e853Sbellard 
26156a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
26166a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
26171ccde1cbSbellard {
26181ccde1cbSbellard }
26199fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
262033417e70Sbellard 
2621e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
26228da3ff18Spbrook 
2623c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2624c04b2b78SPaul Brook typedef struct subpage_t {
2625c04b2b78SPaul Brook     target_phys_addr_t base;
2626f6405247SRichard Henderson     ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2627f6405247SRichard Henderson     ram_addr_t region_offset[TARGET_PAGE_SIZE];
2628c04b2b78SPaul Brook } subpage_t;
2629c04b2b78SPaul Brook 
2630c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2631c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2632f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2633f6405247SRichard Henderson                                 ram_addr_t orig_memory,
2634f6405247SRichard Henderson                                 ram_addr_t region_offset);
2635db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2636db7b5426Sblueswir1                       need_subpage)                                     \
2637db7b5426Sblueswir1     do {                                                                \
2638db7b5426Sblueswir1         if (addr > start_addr)                                          \
2639db7b5426Sblueswir1             start_addr2 = 0;                                            \
2640db7b5426Sblueswir1         else {                                                          \
2641db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2642db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2643db7b5426Sblueswir1                 need_subpage = 1;                                       \
2644db7b5426Sblueswir1         }                                                               \
2645db7b5426Sblueswir1                                                                         \
264649e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2647db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2648db7b5426Sblueswir1         else {                                                          \
2649db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2650db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2651db7b5426Sblueswir1                 need_subpage = 1;                                       \
2652db7b5426Sblueswir1         }                                                               \
2653db7b5426Sblueswir1     } while (0)
2654db7b5426Sblueswir1 
26558f2498f9SMichael S. Tsirkin /* register physical memory.
26568f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
26578f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
26588da3ff18Spbrook    io memory page.  The address used when calling the IO function is
26598da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2660ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
26618da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
26628da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
26630fd542fbSMichael S. Tsirkin void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2664c227f099SAnthony Liguori                                          ram_addr_t size,
2665c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
26660fd542fbSMichael S. Tsirkin                                          ram_addr_t region_offset,
26670fd542fbSMichael S. Tsirkin                                          bool log_dirty)
266833417e70Sbellard {
2669c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
267092e873b9Sbellard     PhysPageDesc *p;
26719d42037bSbellard     CPUState *env;
2672c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2673f6405247SRichard Henderson     subpage_t *subpage;
267433417e70Sbellard 
26753b8e6a2dSEdgar E. Iglesias     assert(size);
26760fd542fbSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2677f6f3fbcaSMichael S. Tsirkin 
267867c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
267967c4d23cSpbrook         region_offset = start_addr;
268067c4d23cSpbrook     }
26818da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
26825fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2683c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
26843b8e6a2dSEdgar E. Iglesias 
26853b8e6a2dSEdgar E. Iglesias     addr = start_addr;
26863b8e6a2dSEdgar E. Iglesias     do {
2687db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2688db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2689c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2690c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2691db7b5426Sblueswir1             int need_subpage = 0;
2692db7b5426Sblueswir1 
2693db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2694db7b5426Sblueswir1                           need_subpage);
2695f6405247SRichard Henderson             if (need_subpage) {
2696db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2697db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26988da3ff18Spbrook                                            &p->phys_offset, orig_memory,
26998da3ff18Spbrook                                            p->region_offset);
2700db7b5426Sblueswir1                 } else {
2701db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2702db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2703db7b5426Sblueswir1                 }
27048da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
27058da3ff18Spbrook                                  region_offset);
27068da3ff18Spbrook                 p->region_offset = 0;
2707db7b5426Sblueswir1             } else {
2708db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2709db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2710db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2711db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2712db7b5426Sblueswir1             }
2713db7b5426Sblueswir1         } else {
2714108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
27159fa3e853Sbellard             p->phys_offset = phys_offset;
27168da3ff18Spbrook             p->region_offset = region_offset;
27172a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
27188da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
271933417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
27208da3ff18Spbrook             } else {
2721c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2722db7b5426Sblueswir1                 int need_subpage = 0;
2723db7b5426Sblueswir1 
2724db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2725db7b5426Sblueswir1                               end_addr2, need_subpage);
2726db7b5426Sblueswir1 
2727f6405247SRichard Henderson                 if (need_subpage) {
2728db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
27298da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
273067c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2731db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
27328da3ff18Spbrook                                      phys_offset, region_offset);
27338da3ff18Spbrook                     p->region_offset = 0;
2734db7b5426Sblueswir1                 }
2735db7b5426Sblueswir1             }
2736db7b5426Sblueswir1         }
27378da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
27383b8e6a2dSEdgar E. Iglesias         addr += TARGET_PAGE_SIZE;
27393b8e6a2dSEdgar E. Iglesias     } while (addr != end_addr);
27409d42037bSbellard 
27419d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
27429d42037bSbellard        reset the modified entries */
27439d42037bSbellard     /* XXX: slow ! */
27449d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
27459d42037bSbellard         tlb_flush(env, 1);
27469d42037bSbellard     }
274733417e70Sbellard }
274833417e70Sbellard 
2749ba863458Sbellard /* XXX: temporary until new memory mapping API */
2750c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2751ba863458Sbellard {
2752ba863458Sbellard     PhysPageDesc *p;
2753ba863458Sbellard 
2754ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2755ba863458Sbellard     if (!p)
2756ba863458Sbellard         return IO_MEM_UNASSIGNED;
2757ba863458Sbellard     return p->phys_offset;
2758ba863458Sbellard }
2759ba863458Sbellard 
2760c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2761f65ed4c1Saliguori {
2762f65ed4c1Saliguori     if (kvm_enabled())
2763f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2764f65ed4c1Saliguori }
2765f65ed4c1Saliguori 
2766c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2767f65ed4c1Saliguori {
2768f65ed4c1Saliguori     if (kvm_enabled())
2769f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2770f65ed4c1Saliguori }
2771f65ed4c1Saliguori 
277262a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
277362a2744cSSheng Yang {
277462a2744cSSheng Yang     if (kvm_enabled())
277562a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
277662a2744cSSheng Yang }
277762a2744cSSheng Yang 
2778c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2779c902760fSMarcelo Tosatti 
2780c902760fSMarcelo Tosatti #include <sys/vfs.h>
2781c902760fSMarcelo Tosatti 
2782c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2783c902760fSMarcelo Tosatti 
2784c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2785c902760fSMarcelo Tosatti {
2786c902760fSMarcelo Tosatti     struct statfs fs;
2787c902760fSMarcelo Tosatti     int ret;
2788c902760fSMarcelo Tosatti 
2789c902760fSMarcelo Tosatti     do {
2790c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2791c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2792c902760fSMarcelo Tosatti 
2793c902760fSMarcelo Tosatti     if (ret != 0) {
27946adc0549SMichael Tokarev         perror(path);
2795c902760fSMarcelo Tosatti         return 0;
2796c902760fSMarcelo Tosatti     }
2797c902760fSMarcelo Tosatti 
2798c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2799c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2800c902760fSMarcelo Tosatti 
2801c902760fSMarcelo Tosatti     return fs.f_bsize;
2802c902760fSMarcelo Tosatti }
2803c902760fSMarcelo Tosatti 
280404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
280504b16653SAlex Williamson                             ram_addr_t memory,
280604b16653SAlex Williamson                             const char *path)
2807c902760fSMarcelo Tosatti {
2808c902760fSMarcelo Tosatti     char *filename;
2809c902760fSMarcelo Tosatti     void *area;
2810c902760fSMarcelo Tosatti     int fd;
2811c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2812c902760fSMarcelo Tosatti     int flags;
2813c902760fSMarcelo Tosatti #endif
2814c902760fSMarcelo Tosatti     unsigned long hpagesize;
2815c902760fSMarcelo Tosatti 
2816c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2817c902760fSMarcelo Tosatti     if (!hpagesize) {
2818c902760fSMarcelo Tosatti         return NULL;
2819c902760fSMarcelo Tosatti     }
2820c902760fSMarcelo Tosatti 
2821c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2822c902760fSMarcelo Tosatti         return NULL;
2823c902760fSMarcelo Tosatti     }
2824c902760fSMarcelo Tosatti 
2825c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2826c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2827c902760fSMarcelo Tosatti         return NULL;
2828c902760fSMarcelo Tosatti     }
2829c902760fSMarcelo Tosatti 
2830c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2831c902760fSMarcelo Tosatti         return NULL;
2832c902760fSMarcelo Tosatti     }
2833c902760fSMarcelo Tosatti 
2834c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2835c902760fSMarcelo Tosatti     if (fd < 0) {
28366adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2837c902760fSMarcelo Tosatti         free(filename);
2838c902760fSMarcelo Tosatti         return NULL;
2839c902760fSMarcelo Tosatti     }
2840c902760fSMarcelo Tosatti     unlink(filename);
2841c902760fSMarcelo Tosatti     free(filename);
2842c902760fSMarcelo Tosatti 
2843c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2844c902760fSMarcelo Tosatti 
2845c902760fSMarcelo Tosatti     /*
2846c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2847c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2848c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2849c902760fSMarcelo Tosatti      * mmap will fail.
2850c902760fSMarcelo Tosatti      */
2851c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2852c902760fSMarcelo Tosatti         perror("ftruncate");
2853c902760fSMarcelo Tosatti 
2854c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2855c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2856c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2857c902760fSMarcelo Tosatti      * to sidestep this quirk.
2858c902760fSMarcelo Tosatti      */
2859c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2860c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2861c902760fSMarcelo Tosatti #else
2862c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2863c902760fSMarcelo Tosatti #endif
2864c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2865c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2866c902760fSMarcelo Tosatti         close(fd);
2867c902760fSMarcelo Tosatti         return (NULL);
2868c902760fSMarcelo Tosatti     }
286904b16653SAlex Williamson     block->fd = fd;
2870c902760fSMarcelo Tosatti     return area;
2871c902760fSMarcelo Tosatti }
2872c902760fSMarcelo Tosatti #endif
2873c902760fSMarcelo Tosatti 
2874d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2875d17b5288SAlex Williamson {
287604b16653SAlex Williamson     RAMBlock *block, *next_block;
28773e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
287804b16653SAlex Williamson 
287904b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
288004b16653SAlex Williamson         return 0;
288104b16653SAlex Williamson 
288204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2883f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
288404b16653SAlex Williamson 
288504b16653SAlex Williamson         end = block->offset + block->length;
288604b16653SAlex Williamson 
288704b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
288804b16653SAlex Williamson             if (next_block->offset >= end) {
288904b16653SAlex Williamson                 next = MIN(next, next_block->offset);
289004b16653SAlex Williamson             }
289104b16653SAlex Williamson         }
289204b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
289304b16653SAlex Williamson             offset = end;
289404b16653SAlex Williamson             mingap = next - end;
289504b16653SAlex Williamson         }
289604b16653SAlex Williamson     }
28973e837b2cSAlex Williamson 
28983e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
28993e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
29003e837b2cSAlex Williamson                 (uint64_t)size);
29013e837b2cSAlex Williamson         abort();
29023e837b2cSAlex Williamson     }
29033e837b2cSAlex Williamson 
290404b16653SAlex Williamson     return offset;
290504b16653SAlex Williamson }
290604b16653SAlex Williamson 
290704b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
290804b16653SAlex Williamson {
2909d17b5288SAlex Williamson     RAMBlock *block;
2910d17b5288SAlex Williamson     ram_addr_t last = 0;
2911d17b5288SAlex Williamson 
2912d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2913d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2914d17b5288SAlex Williamson 
2915d17b5288SAlex Williamson     return last;
2916d17b5288SAlex Williamson }
2917d17b5288SAlex Williamson 
291884b89d78SCam Macdonell ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2919fce537d4SAvi Kivity                                    ram_addr_t size, void *host,
2920fce537d4SAvi Kivity                                    MemoryRegion *mr)
292184b89d78SCam Macdonell {
292284b89d78SCam Macdonell     RAMBlock *new_block, *block;
292384b89d78SCam Macdonell 
292484b89d78SCam Macdonell     size = TARGET_PAGE_ALIGN(size);
29257267c094SAnthony Liguori     new_block = g_malloc0(sizeof(*new_block));
292684b89d78SCam Macdonell 
292784b89d78SCam Macdonell     if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
292884b89d78SCam Macdonell         char *id = dev->parent_bus->info->get_dev_path(dev);
292984b89d78SCam Macdonell         if (id) {
293084b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
29317267c094SAnthony Liguori             g_free(id);
293284b89d78SCam Macdonell         }
293384b89d78SCam Macdonell     }
293484b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
293584b89d78SCam Macdonell 
293684b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
293784b89d78SCam Macdonell         if (!strcmp(block->idstr, new_block->idstr)) {
293884b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
293984b89d78SCam Macdonell                     new_block->idstr);
294084b89d78SCam Macdonell             abort();
294184b89d78SCam Macdonell         }
294284b89d78SCam Macdonell     }
294384b89d78SCam Macdonell 
2944432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
29456977dfe6SYoshiaki Tamura     if (host) {
294684b89d78SCam Macdonell         new_block->host = host;
2947cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
29486977dfe6SYoshiaki Tamura     } else {
2949c902760fSMarcelo Tosatti         if (mem_path) {
2950c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
295104b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2952618a568dSMarcelo Tosatti             if (!new_block->host) {
2953618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
2954e78815a5SAndreas Färber                 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2955618a568dSMarcelo Tosatti             }
2956c902760fSMarcelo Tosatti #else
2957c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2958c902760fSMarcelo Tosatti             exit(1);
2959c902760fSMarcelo Tosatti #endif
2960c902760fSMarcelo Tosatti         } else {
29616b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962ff83678aSChristian Borntraeger             /* S390 KVM requires the topmost vma of the RAM to be smaller than
2963ff83678aSChristian Borntraeger                an system defined value, which is at least 256GB. Larger systems
2964ff83678aSChristian Borntraeger                have larger values. We put the guest between the end of data
2965ff83678aSChristian Borntraeger                segment (system break) and this value. We use 32GB as a base to
2966ff83678aSChristian Borntraeger                have enough room for the system break to grow. */
2967ff83678aSChristian Borntraeger             new_block->host = mmap((void*)0x800000000, size,
2968c902760fSMarcelo Tosatti                                    PROT_EXEC|PROT_READ|PROT_WRITE,
2969ff83678aSChristian Borntraeger                                    MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2970fb8b2735SAlexander Graf             if (new_block->host == MAP_FAILED) {
2971fb8b2735SAlexander Graf                 fprintf(stderr, "Allocating RAM failed\n");
2972fb8b2735SAlexander Graf                 abort();
2973fb8b2735SAlexander Graf             }
29746b02494dSAlexander Graf #else
2975868bb33fSJan Kiszka             if (xen_enabled()) {
2976fce537d4SAvi Kivity                 xen_ram_alloc(new_block->offset, size, mr);
2977432d268cSJun Nakajima             } else {
297894a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2979432d268cSJun Nakajima             }
29806b02494dSAlexander Graf #endif
2981e78815a5SAndreas Färber             qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2982c902760fSMarcelo Tosatti         }
29836977dfe6SYoshiaki Tamura     }
298494a6b54fSpbrook     new_block->length = size;
298594a6b54fSpbrook 
2986f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
298794a6b54fSpbrook 
29887267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
298904b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
2990d17b5288SAlex Williamson     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
299194a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
299294a6b54fSpbrook 
29936f0437e8SJan Kiszka     if (kvm_enabled())
29946f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
29956f0437e8SJan Kiszka 
299694a6b54fSpbrook     return new_block->offset;
299794a6b54fSpbrook }
2998e9a1ab19Sbellard 
2999fce537d4SAvi Kivity ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size,
3000fce537d4SAvi Kivity                           MemoryRegion *mr)
30016977dfe6SYoshiaki Tamura {
3002fce537d4SAvi Kivity     return qemu_ram_alloc_from_ptr(dev, name, size, NULL, mr);
30036977dfe6SYoshiaki Tamura }
30046977dfe6SYoshiaki Tamura 
30051f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
30061f2e98b6SAlex Williamson {
30071f2e98b6SAlex Williamson     RAMBlock *block;
30081f2e98b6SAlex Williamson 
30091f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
30101f2e98b6SAlex Williamson         if (addr == block->offset) {
30111f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
30127267c094SAnthony Liguori             g_free(block);
30131f2e98b6SAlex Williamson             return;
30141f2e98b6SAlex Williamson         }
30151f2e98b6SAlex Williamson     }
30161f2e98b6SAlex Williamson }
30171f2e98b6SAlex Williamson 
3018c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
3019e9a1ab19Sbellard {
302004b16653SAlex Williamson     RAMBlock *block;
302104b16653SAlex Williamson 
302204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
302304b16653SAlex Williamson         if (addr == block->offset) {
302404b16653SAlex Williamson             QLIST_REMOVE(block, next);
3025cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
3026cd19cfa2SHuang Ying                 ;
3027cd19cfa2SHuang Ying             } else if (mem_path) {
302804b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
302904b16653SAlex Williamson                 if (block->fd) {
303004b16653SAlex Williamson                     munmap(block->host, block->length);
303104b16653SAlex Williamson                     close(block->fd);
303204b16653SAlex Williamson                 } else {
303304b16653SAlex Williamson                     qemu_vfree(block->host);
303404b16653SAlex Williamson                 }
3035fd28aa13SJan Kiszka #else
3036fd28aa13SJan Kiszka                 abort();
303704b16653SAlex Williamson #endif
303804b16653SAlex Williamson             } else {
303904b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
304004b16653SAlex Williamson                 munmap(block->host, block->length);
304104b16653SAlex Williamson #else
3042868bb33fSJan Kiszka                 if (xen_enabled()) {
3043e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
3044432d268cSJun Nakajima                 } else {
304504b16653SAlex Williamson                     qemu_vfree(block->host);
3046432d268cSJun Nakajima                 }
304704b16653SAlex Williamson #endif
304804b16653SAlex Williamson             }
30497267c094SAnthony Liguori             g_free(block);
305004b16653SAlex Williamson             return;
305104b16653SAlex Williamson         }
305204b16653SAlex Williamson     }
305304b16653SAlex Williamson 
3054e9a1ab19Sbellard }
3055e9a1ab19Sbellard 
3056cd19cfa2SHuang Ying #ifndef _WIN32
3057cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3058cd19cfa2SHuang Ying {
3059cd19cfa2SHuang Ying     RAMBlock *block;
3060cd19cfa2SHuang Ying     ram_addr_t offset;
3061cd19cfa2SHuang Ying     int flags;
3062cd19cfa2SHuang Ying     void *area, *vaddr;
3063cd19cfa2SHuang Ying 
3064cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
3065cd19cfa2SHuang Ying         offset = addr - block->offset;
3066cd19cfa2SHuang Ying         if (offset < block->length) {
3067cd19cfa2SHuang Ying             vaddr = block->host + offset;
3068cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
3069cd19cfa2SHuang Ying                 ;
3070cd19cfa2SHuang Ying             } else {
3071cd19cfa2SHuang Ying                 flags = MAP_FIXED;
3072cd19cfa2SHuang Ying                 munmap(vaddr, length);
3073cd19cfa2SHuang Ying                 if (mem_path) {
3074cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
3075cd19cfa2SHuang Ying                     if (block->fd) {
3076cd19cfa2SHuang Ying #ifdef MAP_POPULATE
3077cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3078cd19cfa2SHuang Ying                             MAP_PRIVATE;
3079cd19cfa2SHuang Ying #else
3080cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
3081cd19cfa2SHuang Ying #endif
3082cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3083cd19cfa2SHuang Ying                                     flags, block->fd, offset);
3084cd19cfa2SHuang Ying                     } else {
3085cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3086cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3087cd19cfa2SHuang Ying                                     flags, -1, 0);
3088cd19cfa2SHuang Ying                     }
3089fd28aa13SJan Kiszka #else
3090fd28aa13SJan Kiszka                     abort();
3091cd19cfa2SHuang Ying #endif
3092cd19cfa2SHuang Ying                 } else {
3093cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3094cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
3095cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3096cd19cfa2SHuang Ying                                 flags, -1, 0);
3097cd19cfa2SHuang Ying #else
3098cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3099cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3100cd19cfa2SHuang Ying                                 flags, -1, 0);
3101cd19cfa2SHuang Ying #endif
3102cd19cfa2SHuang Ying                 }
3103cd19cfa2SHuang Ying                 if (area != vaddr) {
3104f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
3105f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3106cd19cfa2SHuang Ying                             length, addr);
3107cd19cfa2SHuang Ying                     exit(1);
3108cd19cfa2SHuang Ying                 }
3109cd19cfa2SHuang Ying                 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3110cd19cfa2SHuang Ying             }
3111cd19cfa2SHuang Ying             return;
3112cd19cfa2SHuang Ying         }
3113cd19cfa2SHuang Ying     }
3114cd19cfa2SHuang Ying }
3115cd19cfa2SHuang Ying #endif /* !_WIN32 */
3116cd19cfa2SHuang Ying 
3117dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
31185579c7f3Spbrook    With the exception of the softmmu code in this file, this should
31195579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
31205579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
31215579c7f3Spbrook 
31225579c7f3Spbrook    It should not be used for general purpose DMA.
31235579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
31245579c7f3Spbrook  */
3125c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
3126dc828ca1Spbrook {
312794a6b54fSpbrook     RAMBlock *block;
312894a6b54fSpbrook 
3129f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3130f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
31317d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
31327d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
3133f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
3134f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
31357d82af38SVincent Palatin             }
3136868bb33fSJan Kiszka             if (xen_enabled()) {
3137432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3138432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3139712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
3140432d268cSJun Nakajima                  */
3141432d268cSJun Nakajima                 if (block->offset == 0) {
3142e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
3143432d268cSJun Nakajima                 } else if (block->host == NULL) {
3144e41d7c69SJan Kiszka                     block->host =
3145e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
3146432d268cSJun Nakajima                 }
3147432d268cSJun Nakajima             }
3148f471a17eSAlex Williamson             return block->host + (addr - block->offset);
314994a6b54fSpbrook         }
3150f471a17eSAlex Williamson     }
3151f471a17eSAlex Williamson 
315294a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
315394a6b54fSpbrook     abort();
3154f471a17eSAlex Williamson 
3155f471a17eSAlex Williamson     return NULL;
3156dc828ca1Spbrook }
3157dc828ca1Spbrook 
3158b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
3159b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3160b2e0a138SMichael S. Tsirkin  */
3161b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
3162b2e0a138SMichael S. Tsirkin {
3163b2e0a138SMichael S. Tsirkin     RAMBlock *block;
3164b2e0a138SMichael S. Tsirkin 
3165b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
3166b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
3167868bb33fSJan Kiszka             if (xen_enabled()) {
3168432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3169432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3170712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
3171432d268cSJun Nakajima                  */
3172432d268cSJun Nakajima                 if (block->offset == 0) {
3173e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
3174432d268cSJun Nakajima                 } else if (block->host == NULL) {
3175e41d7c69SJan Kiszka                     block->host =
3176e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
3177432d268cSJun Nakajima                 }
3178432d268cSJun Nakajima             }
3179b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
3180b2e0a138SMichael S. Tsirkin         }
3181b2e0a138SMichael S. Tsirkin     }
3182b2e0a138SMichael S. Tsirkin 
3183b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3184b2e0a138SMichael S. Tsirkin     abort();
3185b2e0a138SMichael S. Tsirkin 
3186b2e0a138SMichael S. Tsirkin     return NULL;
3187b2e0a138SMichael S. Tsirkin }
3188b2e0a138SMichael S. Tsirkin 
318938bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
319038bee5dcSStefano Stabellini  * but takes a size argument */
31918ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
319238bee5dcSStefano Stabellini {
31938ab934f9SStefano Stabellini     if (*size == 0) {
31948ab934f9SStefano Stabellini         return NULL;
31958ab934f9SStefano Stabellini     }
3196868bb33fSJan Kiszka     if (xen_enabled()) {
3197e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
3198868bb33fSJan Kiszka     } else {
319938bee5dcSStefano Stabellini         RAMBlock *block;
320038bee5dcSStefano Stabellini 
320138bee5dcSStefano Stabellini         QLIST_FOREACH(block, &ram_list.blocks, next) {
320238bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
320338bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
320438bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
320538bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
320638bee5dcSStefano Stabellini             }
320738bee5dcSStefano Stabellini         }
320838bee5dcSStefano Stabellini 
320938bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
321038bee5dcSStefano Stabellini         abort();
321138bee5dcSStefano Stabellini     }
321238bee5dcSStefano Stabellini }
321338bee5dcSStefano Stabellini 
3214050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
3215050a0ddfSAnthony PERARD {
3216050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
3217050a0ddfSAnthony PERARD }
3218050a0ddfSAnthony PERARD 
3219e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
32205579c7f3Spbrook {
322194a6b54fSpbrook     RAMBlock *block;
322294a6b54fSpbrook     uint8_t *host = ptr;
322394a6b54fSpbrook 
3224868bb33fSJan Kiszka     if (xen_enabled()) {
3225e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
3226712c2b41SStefano Stabellini         return 0;
3227712c2b41SStefano Stabellini     }
3228712c2b41SStefano Stabellini 
3229f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3230432d268cSJun Nakajima         /* This case append when the block is not mapped. */
3231432d268cSJun Nakajima         if (block->host == NULL) {
3232432d268cSJun Nakajima             continue;
3233432d268cSJun Nakajima         }
3234f471a17eSAlex Williamson         if (host - block->host < block->length) {
3235e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
3236e890261fSMarcelo Tosatti             return 0;
323794a6b54fSpbrook         }
3238f471a17eSAlex Williamson     }
3239432d268cSJun Nakajima 
3240e890261fSMarcelo Tosatti     return -1;
3241e890261fSMarcelo Tosatti }
3242f471a17eSAlex Williamson 
3243e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
3244e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
3245e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3246e890261fSMarcelo Tosatti {
3247e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
3248e890261fSMarcelo Tosatti 
3249e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
325094a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
325194a6b54fSpbrook         abort();
3252e890261fSMarcelo Tosatti     }
3253e890261fSMarcelo Tosatti     return ram_addr;
32545579c7f3Spbrook }
32555579c7f3Spbrook 
3256c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
325733417e70Sbellard {
325867d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3259ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
326067d3b957Spbrook #endif
32615b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3262b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
3263e18231a3Sblueswir1 #endif
3264e18231a3Sblueswir1     return 0;
3265e18231a3Sblueswir1 }
3266e18231a3Sblueswir1 
3267c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3268e18231a3Sblueswir1 {
3269e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3270e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3271e18231a3Sblueswir1 #endif
32725b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3273b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
3274e18231a3Sblueswir1 #endif
3275e18231a3Sblueswir1     return 0;
3276e18231a3Sblueswir1 }
3277e18231a3Sblueswir1 
3278c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3279e18231a3Sblueswir1 {
3280e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3281e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3282e18231a3Sblueswir1 #endif
32835b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3284b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
3285b4f0a316Sblueswir1 #endif
328633417e70Sbellard     return 0;
328733417e70Sbellard }
328833417e70Sbellard 
3289c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
329033417e70Sbellard {
329167d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3292ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
329367d3b957Spbrook #endif
32945b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3295b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
3296e18231a3Sblueswir1 #endif
3297e18231a3Sblueswir1 }
3298e18231a3Sblueswir1 
3299c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3300e18231a3Sblueswir1 {
3301e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3302e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3303e18231a3Sblueswir1 #endif
33045b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3305b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
3306e18231a3Sblueswir1 #endif
3307e18231a3Sblueswir1 }
3308e18231a3Sblueswir1 
3309c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3310e18231a3Sblueswir1 {
3311e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3312e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3313e18231a3Sblueswir1 #endif
33145b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3315b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
3316b4f0a316Sblueswir1 #endif
331733417e70Sbellard }
331833417e70Sbellard 
3319d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
332033417e70Sbellard     unassigned_mem_readb,
3321e18231a3Sblueswir1     unassigned_mem_readw,
3322e18231a3Sblueswir1     unassigned_mem_readl,
332333417e70Sbellard };
332433417e70Sbellard 
3325d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
332633417e70Sbellard     unassigned_mem_writeb,
3327e18231a3Sblueswir1     unassigned_mem_writew,
3328e18231a3Sblueswir1     unassigned_mem_writel,
332933417e70Sbellard };
333033417e70Sbellard 
3331c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
33320f459d16Spbrook                                 uint32_t val)
33331ccde1cbSbellard {
33343a7d929eSbellard     int dirty_flags;
3335f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33363a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33373a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33383a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
3339f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33403a7d929eSbellard #endif
33413a7d929eSbellard     }
33425579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
3343f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3344f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3345f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3346f23db169Sbellard        flushed */
3347f23db169Sbellard     if (dirty_flags == 0xff)
33482e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33491ccde1cbSbellard }
33501ccde1cbSbellard 
3351c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
33520f459d16Spbrook                                 uint32_t val)
33531ccde1cbSbellard {
33543a7d929eSbellard     int dirty_flags;
3355f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33563a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33573a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33583a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
3359f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33603a7d929eSbellard #endif
33613a7d929eSbellard     }
33625579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
3363f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3364f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3365f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3366f23db169Sbellard        flushed */
3367f23db169Sbellard     if (dirty_flags == 0xff)
33682e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33691ccde1cbSbellard }
33701ccde1cbSbellard 
3371c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
33720f459d16Spbrook                                 uint32_t val)
33731ccde1cbSbellard {
33743a7d929eSbellard     int dirty_flags;
3375f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33763a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33773a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33783a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
3379f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33803a7d929eSbellard #endif
33813a7d929eSbellard     }
33825579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
3383f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3384f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3385f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3386f23db169Sbellard        flushed */
3387f23db169Sbellard     if (dirty_flags == 0xff)
33882e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33891ccde1cbSbellard }
33901ccde1cbSbellard 
3391d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
33923a7d929eSbellard     NULL, /* never used */
33933a7d929eSbellard     NULL, /* never used */
33943a7d929eSbellard     NULL, /* never used */
33953a7d929eSbellard };
33963a7d929eSbellard 
3397d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
33981ccde1cbSbellard     notdirty_mem_writeb,
33991ccde1cbSbellard     notdirty_mem_writew,
34001ccde1cbSbellard     notdirty_mem_writel,
34011ccde1cbSbellard };
34021ccde1cbSbellard 
34030f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
3404b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
34050f459d16Spbrook {
34060f459d16Spbrook     CPUState *env = cpu_single_env;
340706d55cc1Saliguori     target_ulong pc, cs_base;
340806d55cc1Saliguori     TranslationBlock *tb;
34090f459d16Spbrook     target_ulong vaddr;
3410a1d1bb31Saliguori     CPUWatchpoint *wp;
341106d55cc1Saliguori     int cpu_flags;
34120f459d16Spbrook 
341306d55cc1Saliguori     if (env->watchpoint_hit) {
341406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
341506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
341606d55cc1Saliguori          * current instruction. */
341706d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
341806d55cc1Saliguori         return;
341906d55cc1Saliguori     }
34202e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
342172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3422b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
3423b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
34246e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
34256e140f28Saliguori             if (!env->watchpoint_hit) {
3426a1d1bb31Saliguori                 env->watchpoint_hit = wp;
342706d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
342806d55cc1Saliguori                 if (!tb) {
34296e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
34306e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
343106d55cc1Saliguori                 }
3432618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
343306d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
343406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
343506d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
343606d55cc1Saliguori                 } else {
343706d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
343806d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
343906d55cc1Saliguori                 }
344006d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
34410f459d16Spbrook             }
34426e140f28Saliguori         } else {
34436e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
34446e140f28Saliguori         }
34450f459d16Spbrook     }
34460f459d16Spbrook }
34470f459d16Spbrook 
34486658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
34496658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
34506658ffb8Spbrook    phys routines.  */
3451c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
34526658ffb8Spbrook {
3453b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
34546658ffb8Spbrook     return ldub_phys(addr);
34556658ffb8Spbrook }
34566658ffb8Spbrook 
3457c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
34586658ffb8Spbrook {
3459b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
34606658ffb8Spbrook     return lduw_phys(addr);
34616658ffb8Spbrook }
34626658ffb8Spbrook 
3463c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
34646658ffb8Spbrook {
3465b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
34666658ffb8Spbrook     return ldl_phys(addr);
34676658ffb8Spbrook }
34686658ffb8Spbrook 
3469c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
34706658ffb8Spbrook                              uint32_t val)
34716658ffb8Spbrook {
3472b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
34736658ffb8Spbrook     stb_phys(addr, val);
34746658ffb8Spbrook }
34756658ffb8Spbrook 
3476c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
34776658ffb8Spbrook                              uint32_t val)
34786658ffb8Spbrook {
3479b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
34806658ffb8Spbrook     stw_phys(addr, val);
34816658ffb8Spbrook }
34826658ffb8Spbrook 
3483c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
34846658ffb8Spbrook                              uint32_t val)
34856658ffb8Spbrook {
3486b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
34876658ffb8Spbrook     stl_phys(addr, val);
34886658ffb8Spbrook }
34896658ffb8Spbrook 
3490d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
34916658ffb8Spbrook     watch_mem_readb,
34926658ffb8Spbrook     watch_mem_readw,
34936658ffb8Spbrook     watch_mem_readl,
34946658ffb8Spbrook };
34956658ffb8Spbrook 
3496d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
34976658ffb8Spbrook     watch_mem_writeb,
34986658ffb8Spbrook     watch_mem_writew,
34996658ffb8Spbrook     watch_mem_writel,
35006658ffb8Spbrook };
35016658ffb8Spbrook 
3502f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio,
3503f6405247SRichard Henderson                                         target_phys_addr_t addr,
3504db7b5426Sblueswir1                                         unsigned int len)
3505db7b5426Sblueswir1 {
3506f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3507db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3508db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3509db7b5426Sblueswir1            mmio, len, addr, idx);
3510db7b5426Sblueswir1 #endif
3511db7b5426Sblueswir1 
3512f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3513f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3514f6405247SRichard Henderson     return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3515db7b5426Sblueswir1 }
3516db7b5426Sblueswir1 
3517c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3518db7b5426Sblueswir1                                      uint32_t value, unsigned int len)
3519db7b5426Sblueswir1 {
3520f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3521db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3522f6405247SRichard Henderson     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3523f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3524db7b5426Sblueswir1 #endif
3525f6405247SRichard Henderson 
3526f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3527f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3528f6405247SRichard Henderson     io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3529db7b5426Sblueswir1 }
3530db7b5426Sblueswir1 
3531c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3532db7b5426Sblueswir1 {
3533db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3534db7b5426Sblueswir1 }
3535db7b5426Sblueswir1 
3536c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3537db7b5426Sblueswir1                             uint32_t value)
3538db7b5426Sblueswir1 {
3539db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3540db7b5426Sblueswir1 }
3541db7b5426Sblueswir1 
3542c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3543db7b5426Sblueswir1 {
3544db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3545db7b5426Sblueswir1 }
3546db7b5426Sblueswir1 
3547c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3548db7b5426Sblueswir1                             uint32_t value)
3549db7b5426Sblueswir1 {
3550db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3551db7b5426Sblueswir1 }
3552db7b5426Sblueswir1 
3553c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3554db7b5426Sblueswir1 {
3555db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3556db7b5426Sblueswir1 }
3557db7b5426Sblueswir1 
3558f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr,
3559f6405247SRichard Henderson                             uint32_t value)
3560db7b5426Sblueswir1 {
3561db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3562db7b5426Sblueswir1 }
3563db7b5426Sblueswir1 
3564d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3565db7b5426Sblueswir1     &subpage_readb,
3566db7b5426Sblueswir1     &subpage_readw,
3567db7b5426Sblueswir1     &subpage_readl,
3568db7b5426Sblueswir1 };
3569db7b5426Sblueswir1 
3570d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3571db7b5426Sblueswir1     &subpage_writeb,
3572db7b5426Sblueswir1     &subpage_writew,
3573db7b5426Sblueswir1     &subpage_writel,
3574db7b5426Sblueswir1 };
3575db7b5426Sblueswir1 
357656384e8bSAndreas Färber static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
357756384e8bSAndreas Färber {
357856384e8bSAndreas Färber     ram_addr_t raddr = addr;
357956384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
358056384e8bSAndreas Färber     return ldub_p(ptr);
358156384e8bSAndreas Färber }
358256384e8bSAndreas Färber 
358356384e8bSAndreas Färber static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
358456384e8bSAndreas Färber                                uint32_t value)
358556384e8bSAndreas Färber {
358656384e8bSAndreas Färber     ram_addr_t raddr = addr;
358756384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
358856384e8bSAndreas Färber     stb_p(ptr, value);
358956384e8bSAndreas Färber }
359056384e8bSAndreas Färber 
359156384e8bSAndreas Färber static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
359256384e8bSAndreas Färber {
359356384e8bSAndreas Färber     ram_addr_t raddr = addr;
359456384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
359556384e8bSAndreas Färber     return lduw_p(ptr);
359656384e8bSAndreas Färber }
359756384e8bSAndreas Färber 
359856384e8bSAndreas Färber static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
359956384e8bSAndreas Färber                                uint32_t value)
360056384e8bSAndreas Färber {
360156384e8bSAndreas Färber     ram_addr_t raddr = addr;
360256384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
360356384e8bSAndreas Färber     stw_p(ptr, value);
360456384e8bSAndreas Färber }
360556384e8bSAndreas Färber 
360656384e8bSAndreas Färber static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
360756384e8bSAndreas Färber {
360856384e8bSAndreas Färber     ram_addr_t raddr = addr;
360956384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
361056384e8bSAndreas Färber     return ldl_p(ptr);
361156384e8bSAndreas Färber }
361256384e8bSAndreas Färber 
361356384e8bSAndreas Färber static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
361456384e8bSAndreas Färber                                uint32_t value)
361556384e8bSAndreas Färber {
361656384e8bSAndreas Färber     ram_addr_t raddr = addr;
361756384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
361856384e8bSAndreas Färber     stl_p(ptr, value);
361956384e8bSAndreas Färber }
362056384e8bSAndreas Färber 
362156384e8bSAndreas Färber static CPUReadMemoryFunc * const subpage_ram_read[] = {
362256384e8bSAndreas Färber     &subpage_ram_readb,
362356384e8bSAndreas Färber     &subpage_ram_readw,
362456384e8bSAndreas Färber     &subpage_ram_readl,
362556384e8bSAndreas Färber };
362656384e8bSAndreas Färber 
362756384e8bSAndreas Färber static CPUWriteMemoryFunc * const subpage_ram_write[] = {
362856384e8bSAndreas Färber     &subpage_ram_writeb,
362956384e8bSAndreas Färber     &subpage_ram_writew,
363056384e8bSAndreas Färber     &subpage_ram_writel,
363156384e8bSAndreas Färber };
363256384e8bSAndreas Färber 
3633c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3634c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3635db7b5426Sblueswir1 {
3636db7b5426Sblueswir1     int idx, eidx;
3637db7b5426Sblueswir1 
3638db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3639db7b5426Sblueswir1         return -1;
3640db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3641db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3642db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
36430bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3644db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3645db7b5426Sblueswir1 #endif
364656384e8bSAndreas Färber     if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
364756384e8bSAndreas Färber         memory = IO_MEM_SUBPAGE_RAM;
364856384e8bSAndreas Färber     }
3649f6405247SRichard Henderson     memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3650db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
3651f6405247SRichard Henderson         mmio->sub_io_index[idx] = memory;
3652f6405247SRichard Henderson         mmio->region_offset[idx] = region_offset;
3653db7b5426Sblueswir1     }
3654db7b5426Sblueswir1 
3655db7b5426Sblueswir1     return 0;
3656db7b5426Sblueswir1 }
3657db7b5426Sblueswir1 
3658f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3659f6405247SRichard Henderson                                 ram_addr_t orig_memory,
3660f6405247SRichard Henderson                                 ram_addr_t region_offset)
3661db7b5426Sblueswir1 {
3662c227f099SAnthony Liguori     subpage_t *mmio;
3663db7b5426Sblueswir1     int subpage_memory;
3664db7b5426Sblueswir1 
36657267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
36661eec614bSaliguori 
3667db7b5426Sblueswir1     mmio->base = base;
36682507c12aSAlexander Graf     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
36692507c12aSAlexander Graf                                             DEVICE_NATIVE_ENDIAN);
3670db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3671db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3672db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3673db7b5426Sblueswir1 #endif
3674db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
3675f6405247SRichard Henderson     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3676db7b5426Sblueswir1 
3677db7b5426Sblueswir1     return mmio;
3678db7b5426Sblueswir1 }
3679db7b5426Sblueswir1 
368088715657Saliguori static int get_free_io_mem_idx(void)
368188715657Saliguori {
368288715657Saliguori     int i;
368388715657Saliguori 
368488715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
368588715657Saliguori         if (!io_mem_used[i]) {
368688715657Saliguori             io_mem_used[i] = 1;
368788715657Saliguori             return i;
368888715657Saliguori         }
3689c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
369088715657Saliguori     return -1;
369188715657Saliguori }
369288715657Saliguori 
3693dd310534SAlexander Graf /*
3694dd310534SAlexander Graf  * Usually, devices operate in little endian mode. There are devices out
3695dd310534SAlexander Graf  * there that operate in big endian too. Each device gets byte swapped
3696dd310534SAlexander Graf  * mmio if plugged onto a CPU that does the other endianness.
3697dd310534SAlexander Graf  *
3698dd310534SAlexander Graf  * CPU          Device           swap?
3699dd310534SAlexander Graf  *
3700dd310534SAlexander Graf  * little       little           no
3701dd310534SAlexander Graf  * little       big              yes
3702dd310534SAlexander Graf  * big          little           yes
3703dd310534SAlexander Graf  * big          big              no
3704dd310534SAlexander Graf  */
3705dd310534SAlexander Graf 
3706dd310534SAlexander Graf typedef struct SwapEndianContainer {
3707dd310534SAlexander Graf     CPUReadMemoryFunc *read[3];
3708dd310534SAlexander Graf     CPUWriteMemoryFunc *write[3];
3709dd310534SAlexander Graf     void *opaque;
3710dd310534SAlexander Graf } SwapEndianContainer;
3711dd310534SAlexander Graf 
3712dd310534SAlexander Graf static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3713dd310534SAlexander Graf {
3714dd310534SAlexander Graf     uint32_t val;
3715dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3716dd310534SAlexander Graf     val = c->read[0](c->opaque, addr);
3717dd310534SAlexander Graf     return val;
3718dd310534SAlexander Graf }
3719dd310534SAlexander Graf 
3720dd310534SAlexander Graf static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3721dd310534SAlexander Graf {
3722dd310534SAlexander Graf     uint32_t val;
3723dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3724dd310534SAlexander Graf     val = bswap16(c->read[1](c->opaque, addr));
3725dd310534SAlexander Graf     return val;
3726dd310534SAlexander Graf }
3727dd310534SAlexander Graf 
3728dd310534SAlexander Graf static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3729dd310534SAlexander Graf {
3730dd310534SAlexander Graf     uint32_t val;
3731dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3732dd310534SAlexander Graf     val = bswap32(c->read[2](c->opaque, addr));
3733dd310534SAlexander Graf     return val;
3734dd310534SAlexander Graf }
3735dd310534SAlexander Graf 
3736dd310534SAlexander Graf static CPUReadMemoryFunc * const swapendian_readfn[3]={
3737dd310534SAlexander Graf     swapendian_mem_readb,
3738dd310534SAlexander Graf     swapendian_mem_readw,
3739dd310534SAlexander Graf     swapendian_mem_readl
3740dd310534SAlexander Graf };
3741dd310534SAlexander Graf 
3742dd310534SAlexander Graf static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3743dd310534SAlexander Graf                                   uint32_t val)
3744dd310534SAlexander Graf {
3745dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3746dd310534SAlexander Graf     c->write[0](c->opaque, addr, val);
3747dd310534SAlexander Graf }
3748dd310534SAlexander Graf 
3749dd310534SAlexander Graf static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3750dd310534SAlexander Graf                                   uint32_t val)
3751dd310534SAlexander Graf {
3752dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3753dd310534SAlexander Graf     c->write[1](c->opaque, addr, bswap16(val));
3754dd310534SAlexander Graf }
3755dd310534SAlexander Graf 
3756dd310534SAlexander Graf static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3757dd310534SAlexander Graf                                   uint32_t val)
3758dd310534SAlexander Graf {
3759dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3760dd310534SAlexander Graf     c->write[2](c->opaque, addr, bswap32(val));
3761dd310534SAlexander Graf }
3762dd310534SAlexander Graf 
3763dd310534SAlexander Graf static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3764dd310534SAlexander Graf     swapendian_mem_writeb,
3765dd310534SAlexander Graf     swapendian_mem_writew,
3766dd310534SAlexander Graf     swapendian_mem_writel
3767dd310534SAlexander Graf };
3768dd310534SAlexander Graf 
3769dd310534SAlexander Graf static void swapendian_init(int io_index)
3770dd310534SAlexander Graf {
37717267c094SAnthony Liguori     SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
3772dd310534SAlexander Graf     int i;
3773dd310534SAlexander Graf 
3774dd310534SAlexander Graf     /* Swap mmio for big endian targets */
3775dd310534SAlexander Graf     c->opaque = io_mem_opaque[io_index];
3776dd310534SAlexander Graf     for (i = 0; i < 3; i++) {
3777dd310534SAlexander Graf         c->read[i] = io_mem_read[io_index][i];
3778dd310534SAlexander Graf         c->write[i] = io_mem_write[io_index][i];
3779dd310534SAlexander Graf 
3780dd310534SAlexander Graf         io_mem_read[io_index][i] = swapendian_readfn[i];
3781dd310534SAlexander Graf         io_mem_write[io_index][i] = swapendian_writefn[i];
3782dd310534SAlexander Graf     }
3783dd310534SAlexander Graf     io_mem_opaque[io_index] = c;
3784dd310534SAlexander Graf }
3785dd310534SAlexander Graf 
3786dd310534SAlexander Graf static void swapendian_del(int io_index)
3787dd310534SAlexander Graf {
3788dd310534SAlexander Graf     if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
37897267c094SAnthony Liguori         g_free(io_mem_opaque[io_index]);
3790dd310534SAlexander Graf     }
3791dd310534SAlexander Graf }
3792dd310534SAlexander Graf 
379333417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
379433417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
37950b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
37963ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
37974254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
37984254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
37994254fab8Sblueswir1    returned if error. */
38001eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3801d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3802d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3803dd310534SAlexander Graf                                         void *opaque, enum device_endian endian)
380433417e70Sbellard {
38053cab721dSRichard Henderson     int i;
38063cab721dSRichard Henderson 
380733417e70Sbellard     if (io_index <= 0) {
380888715657Saliguori         io_index = get_free_io_mem_idx();
380988715657Saliguori         if (io_index == -1)
381088715657Saliguori             return io_index;
381133417e70Sbellard     } else {
38121eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
381333417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
381433417e70Sbellard             return -1;
381533417e70Sbellard     }
381633417e70Sbellard 
38173cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
38183cab721dSRichard Henderson         io_mem_read[io_index][i]
38193cab721dSRichard Henderson             = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
38203cab721dSRichard Henderson     }
38213cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
38223cab721dSRichard Henderson         io_mem_write[io_index][i]
38233cab721dSRichard Henderson             = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
38243cab721dSRichard Henderson     }
3825a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
3826f6405247SRichard Henderson 
3827dd310534SAlexander Graf     switch (endian) {
3828dd310534SAlexander Graf     case DEVICE_BIG_ENDIAN:
3829dd310534SAlexander Graf #ifndef TARGET_WORDS_BIGENDIAN
3830dd310534SAlexander Graf         swapendian_init(io_index);
3831dd310534SAlexander Graf #endif
3832dd310534SAlexander Graf         break;
3833dd310534SAlexander Graf     case DEVICE_LITTLE_ENDIAN:
3834dd310534SAlexander Graf #ifdef TARGET_WORDS_BIGENDIAN
3835dd310534SAlexander Graf         swapendian_init(io_index);
3836dd310534SAlexander Graf #endif
3837dd310534SAlexander Graf         break;
3838dd310534SAlexander Graf     case DEVICE_NATIVE_ENDIAN:
3839dd310534SAlexander Graf     default:
3840dd310534SAlexander Graf         break;
3841dd310534SAlexander Graf     }
3842dd310534SAlexander Graf 
3843f6405247SRichard Henderson     return (io_index << IO_MEM_SHIFT);
384433417e70Sbellard }
384561382a50Sbellard 
3846d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3847d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
3848dd310534SAlexander Graf                            void *opaque, enum device_endian endian)
38491eed09cbSAvi Kivity {
38502507c12aSAlexander Graf     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
38511eed09cbSAvi Kivity }
38521eed09cbSAvi Kivity 
385388715657Saliguori void cpu_unregister_io_memory(int io_table_address)
385488715657Saliguori {
385588715657Saliguori     int i;
385688715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
385788715657Saliguori 
3858dd310534SAlexander Graf     swapendian_del(io_index);
3859dd310534SAlexander Graf 
386088715657Saliguori     for (i=0;i < 3; i++) {
386188715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
386288715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
386388715657Saliguori     }
386488715657Saliguori     io_mem_opaque[io_index] = NULL;
386588715657Saliguori     io_mem_used[io_index] = 0;
386688715657Saliguori }
386788715657Saliguori 
3868e9179ce1SAvi Kivity static void io_mem_init(void)
3869e9179ce1SAvi Kivity {
3870e9179ce1SAvi Kivity     int i;
3871e9179ce1SAvi Kivity 
38722507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
38732507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
38742507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
38752507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
38762507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
38772507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
38782507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
38792507c12aSAlexander Graf                                  notdirty_mem_write, NULL,
38802507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
388156384e8bSAndreas Färber     cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
388256384e8bSAndreas Färber                                  subpage_ram_write, NULL,
388356384e8bSAndreas Färber                                  DEVICE_NATIVE_ENDIAN);
3884e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3885e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3886e9179ce1SAvi Kivity 
3887e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
38882507c12aSAlexander Graf                                           watch_mem_write, NULL,
38892507c12aSAlexander Graf                                           DEVICE_NATIVE_ENDIAN);
3890e9179ce1SAvi Kivity }
3891e9179ce1SAvi Kivity 
389262152b8aSAvi Kivity static void memory_map_init(void)
389362152b8aSAvi Kivity {
38947267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
38958417cebfSAvi Kivity     memory_region_init(system_memory, "system", INT64_MAX);
389662152b8aSAvi Kivity     set_system_memory_map(system_memory);
3897309cb471SAvi Kivity 
38987267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
3899309cb471SAvi Kivity     memory_region_init(system_io, "io", 65536);
3900309cb471SAvi Kivity     set_system_io_map(system_io);
390162152b8aSAvi Kivity }
390262152b8aSAvi Kivity 
390362152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
390462152b8aSAvi Kivity {
390562152b8aSAvi Kivity     return system_memory;
390662152b8aSAvi Kivity }
390762152b8aSAvi Kivity 
3908309cb471SAvi Kivity MemoryRegion *get_system_io(void)
3909309cb471SAvi Kivity {
3910309cb471SAvi Kivity     return system_io;
3911309cb471SAvi Kivity }
3912309cb471SAvi Kivity 
3913e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3914e2eef170Spbrook 
391513eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
391613eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3917a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3918a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
391913eb76e0Sbellard {
392013eb76e0Sbellard     int l, flags;
392113eb76e0Sbellard     target_ulong page;
392253a5960aSpbrook     void * p;
392313eb76e0Sbellard 
392413eb76e0Sbellard     while (len > 0) {
392513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
392613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
392713eb76e0Sbellard         if (l > len)
392813eb76e0Sbellard             l = len;
392913eb76e0Sbellard         flags = page_get_flags(page);
393013eb76e0Sbellard         if (!(flags & PAGE_VALID))
3931a68fe89cSPaul Brook             return -1;
393213eb76e0Sbellard         if (is_write) {
393313eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3934a68fe89cSPaul Brook                 return -1;
3935579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
393672fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3937a68fe89cSPaul Brook                 return -1;
393872fb7daaSaurel32             memcpy(p, buf, l);
393972fb7daaSaurel32             unlock_user(p, addr, l);
394013eb76e0Sbellard         } else {
394113eb76e0Sbellard             if (!(flags & PAGE_READ))
3942a68fe89cSPaul Brook                 return -1;
3943579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
394472fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3945a68fe89cSPaul Brook                 return -1;
394672fb7daaSaurel32             memcpy(buf, p, l);
39475b257578Saurel32             unlock_user(p, addr, 0);
394813eb76e0Sbellard         }
394913eb76e0Sbellard         len -= l;
395013eb76e0Sbellard         buf += l;
395113eb76e0Sbellard         addr += l;
395213eb76e0Sbellard     }
3953a68fe89cSPaul Brook     return 0;
395413eb76e0Sbellard }
39558df1cd07Sbellard 
395613eb76e0Sbellard #else
3957c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
395813eb76e0Sbellard                             int len, int is_write)
395913eb76e0Sbellard {
396013eb76e0Sbellard     int l, io_index;
396113eb76e0Sbellard     uint8_t *ptr;
396213eb76e0Sbellard     uint32_t val;
3963c227f099SAnthony Liguori     target_phys_addr_t page;
39648ca5692dSAnthony PERARD     ram_addr_t pd;
396592e873b9Sbellard     PhysPageDesc *p;
396613eb76e0Sbellard 
396713eb76e0Sbellard     while (len > 0) {
396813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
396913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
397013eb76e0Sbellard         if (l > len)
397113eb76e0Sbellard             l = len;
397292e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
397313eb76e0Sbellard         if (!p) {
397413eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
397513eb76e0Sbellard         } else {
397613eb76e0Sbellard             pd = p->phys_offset;
397713eb76e0Sbellard         }
397813eb76e0Sbellard 
397913eb76e0Sbellard         if (is_write) {
39803a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3981c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
398213eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
39838da3ff18Spbrook                 if (p)
39846c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
39856a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
39866a00d601Sbellard                    potential bugs */
39876c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
39881c213d19Sbellard                     /* 32 bit write access */
3989c27004ecSbellard                     val = ldl_p(buf);
39906c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
399113eb76e0Sbellard                     l = 4;
39926c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
39931c213d19Sbellard                     /* 16 bit write access */
3994c27004ecSbellard                     val = lduw_p(buf);
39956c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
399613eb76e0Sbellard                     l = 2;
399713eb76e0Sbellard                 } else {
39981c213d19Sbellard                     /* 8 bit write access */
3999c27004ecSbellard                     val = ldub_p(buf);
40006c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
400113eb76e0Sbellard                     l = 1;
400213eb76e0Sbellard                 }
400313eb76e0Sbellard             } else {
40048ca5692dSAnthony PERARD                 ram_addr_t addr1;
4005b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
400613eb76e0Sbellard                 /* RAM case */
40075579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
400813eb76e0Sbellard                 memcpy(ptr, buf, l);
40093a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
4010b448f2f3Sbellard                     /* invalidate code */
4011b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4012b448f2f3Sbellard                     /* set dirty bit */
4013f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
4014f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
401513eb76e0Sbellard                 }
4016050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
40173a7d929eSbellard             }
401813eb76e0Sbellard         } else {
40192a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
40202a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
4021c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
402213eb76e0Sbellard                 /* I/O case */
402313eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
40248da3ff18Spbrook                 if (p)
40256c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
40266c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
402713eb76e0Sbellard                     /* 32 bit read access */
40286c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
4029c27004ecSbellard                     stl_p(buf, val);
403013eb76e0Sbellard                     l = 4;
40316c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
403213eb76e0Sbellard                     /* 16 bit read access */
40336c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
4034c27004ecSbellard                     stw_p(buf, val);
403513eb76e0Sbellard                     l = 2;
403613eb76e0Sbellard                 } else {
40371c213d19Sbellard                     /* 8 bit read access */
40386c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
4039c27004ecSbellard                     stb_p(buf, val);
404013eb76e0Sbellard                     l = 1;
404113eb76e0Sbellard                 }
404213eb76e0Sbellard             } else {
404313eb76e0Sbellard                 /* RAM case */
4044050a0ddfSAnthony PERARD                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
4045050a0ddfSAnthony PERARD                 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
4046050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
404713eb76e0Sbellard             }
404813eb76e0Sbellard         }
404913eb76e0Sbellard         len -= l;
405013eb76e0Sbellard         buf += l;
405113eb76e0Sbellard         addr += l;
405213eb76e0Sbellard     }
405313eb76e0Sbellard }
40548df1cd07Sbellard 
4055d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
4056c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
4057d0ecd2aaSbellard                                    const uint8_t *buf, int len)
4058d0ecd2aaSbellard {
4059d0ecd2aaSbellard     int l;
4060d0ecd2aaSbellard     uint8_t *ptr;
4061c227f099SAnthony Liguori     target_phys_addr_t page;
4062d0ecd2aaSbellard     unsigned long pd;
4063d0ecd2aaSbellard     PhysPageDesc *p;
4064d0ecd2aaSbellard 
4065d0ecd2aaSbellard     while (len > 0) {
4066d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
4067d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
4068d0ecd2aaSbellard         if (l > len)
4069d0ecd2aaSbellard             l = len;
4070d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
4071d0ecd2aaSbellard         if (!p) {
4072d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
4073d0ecd2aaSbellard         } else {
4074d0ecd2aaSbellard             pd = p->phys_offset;
4075d0ecd2aaSbellard         }
4076d0ecd2aaSbellard 
4077d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
40782a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
40792a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
4080d0ecd2aaSbellard             /* do nothing */
4081d0ecd2aaSbellard         } else {
4082d0ecd2aaSbellard             unsigned long addr1;
4083d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4084d0ecd2aaSbellard             /* ROM/RAM case */
40855579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
4086d0ecd2aaSbellard             memcpy(ptr, buf, l);
4087050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
4088d0ecd2aaSbellard         }
4089d0ecd2aaSbellard         len -= l;
4090d0ecd2aaSbellard         buf += l;
4091d0ecd2aaSbellard         addr += l;
4092d0ecd2aaSbellard     }
4093d0ecd2aaSbellard }
4094d0ecd2aaSbellard 
40956d16c2f8Saliguori typedef struct {
40966d16c2f8Saliguori     void *buffer;
4097c227f099SAnthony Liguori     target_phys_addr_t addr;
4098c227f099SAnthony Liguori     target_phys_addr_t len;
40996d16c2f8Saliguori } BounceBuffer;
41006d16c2f8Saliguori 
41016d16c2f8Saliguori static BounceBuffer bounce;
41026d16c2f8Saliguori 
4103ba223c29Saliguori typedef struct MapClient {
4104ba223c29Saliguori     void *opaque;
4105ba223c29Saliguori     void (*callback)(void *opaque);
410672cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
4107ba223c29Saliguori } MapClient;
4108ba223c29Saliguori 
410972cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
411072cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
4111ba223c29Saliguori 
4112ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4113ba223c29Saliguori {
41147267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
4115ba223c29Saliguori 
4116ba223c29Saliguori     client->opaque = opaque;
4117ba223c29Saliguori     client->callback = callback;
411872cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
4119ba223c29Saliguori     return client;
4120ba223c29Saliguori }
4121ba223c29Saliguori 
4122ba223c29Saliguori void cpu_unregister_map_client(void *_client)
4123ba223c29Saliguori {
4124ba223c29Saliguori     MapClient *client = (MapClient *)_client;
4125ba223c29Saliguori 
412672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
41277267c094SAnthony Liguori     g_free(client);
4128ba223c29Saliguori }
4129ba223c29Saliguori 
4130ba223c29Saliguori static void cpu_notify_map_clients(void)
4131ba223c29Saliguori {
4132ba223c29Saliguori     MapClient *client;
4133ba223c29Saliguori 
413472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
413572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
4136ba223c29Saliguori         client->callback(client->opaque);
413734d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
4138ba223c29Saliguori     }
4139ba223c29Saliguori }
4140ba223c29Saliguori 
41416d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
41426d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
41436d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
41446d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
4145ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
4146ba223c29Saliguori  * likely to succeed.
41476d16c2f8Saliguori  */
4148c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
4149c227f099SAnthony Liguori                               target_phys_addr_t *plen,
41506d16c2f8Saliguori                               int is_write)
41516d16c2f8Saliguori {
4152c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
415338bee5dcSStefano Stabellini     target_phys_addr_t todo = 0;
41546d16c2f8Saliguori     int l;
4155c227f099SAnthony Liguori     target_phys_addr_t page;
41566d16c2f8Saliguori     unsigned long pd;
41576d16c2f8Saliguori     PhysPageDesc *p;
4158f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
41598ab934f9SStefano Stabellini     ram_addr_t rlen;
41608ab934f9SStefano Stabellini     void *ret;
41616d16c2f8Saliguori 
41626d16c2f8Saliguori     while (len > 0) {
41636d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
41646d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
41656d16c2f8Saliguori         if (l > len)
41666d16c2f8Saliguori             l = len;
41676d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
41686d16c2f8Saliguori         if (!p) {
41696d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
41706d16c2f8Saliguori         } else {
41716d16c2f8Saliguori             pd = p->phys_offset;
41726d16c2f8Saliguori         }
41736d16c2f8Saliguori 
41746d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
417538bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
41766d16c2f8Saliguori                 break;
41776d16c2f8Saliguori             }
41786d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
41796d16c2f8Saliguori             bounce.addr = addr;
41806d16c2f8Saliguori             bounce.len = l;
41816d16c2f8Saliguori             if (!is_write) {
418254f7b4a3SStefan Weil                 cpu_physical_memory_read(addr, bounce.buffer, l);
41836d16c2f8Saliguori             }
418438bee5dcSStefano Stabellini 
418538bee5dcSStefano Stabellini             *plen = l;
418638bee5dcSStefano Stabellini             return bounce.buffer;
41876d16c2f8Saliguori         }
41888ab934f9SStefano Stabellini         if (!todo) {
41898ab934f9SStefano Stabellini             raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
41908ab934f9SStefano Stabellini         }
41916d16c2f8Saliguori 
41926d16c2f8Saliguori         len -= l;
41936d16c2f8Saliguori         addr += l;
419438bee5dcSStefano Stabellini         todo += l;
41956d16c2f8Saliguori     }
41968ab934f9SStefano Stabellini     rlen = todo;
41978ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
41988ab934f9SStefano Stabellini     *plen = rlen;
41998ab934f9SStefano Stabellini     return ret;
42006d16c2f8Saliguori }
42016d16c2f8Saliguori 
42026d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
42036d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
42046d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
42056d16c2f8Saliguori  */
4206c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4207c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
42086d16c2f8Saliguori {
42096d16c2f8Saliguori     if (buffer != bounce.buffer) {
42106d16c2f8Saliguori         if (is_write) {
4211e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
42126d16c2f8Saliguori             while (access_len) {
42136d16c2f8Saliguori                 unsigned l;
42146d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
42156d16c2f8Saliguori                 if (l > access_len)
42166d16c2f8Saliguori                     l = access_len;
42176d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
42186d16c2f8Saliguori                     /* invalidate code */
42196d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
42206d16c2f8Saliguori                     /* set dirty bit */
4221f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
4222f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
42236d16c2f8Saliguori                 }
42246d16c2f8Saliguori                 addr1 += l;
42256d16c2f8Saliguori                 access_len -= l;
42266d16c2f8Saliguori             }
42276d16c2f8Saliguori         }
4228868bb33fSJan Kiszka         if (xen_enabled()) {
4229e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
4230050a0ddfSAnthony PERARD         }
42316d16c2f8Saliguori         return;
42326d16c2f8Saliguori     }
42336d16c2f8Saliguori     if (is_write) {
42346d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
42356d16c2f8Saliguori     }
4236f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
42376d16c2f8Saliguori     bounce.buffer = NULL;
4238ba223c29Saliguori     cpu_notify_map_clients();
42396d16c2f8Saliguori }
4240d0ecd2aaSbellard 
42418df1cd07Sbellard /* warning: addr must be aligned */
42421e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
42431e78bcc1SAlexander Graf                                          enum device_endian endian)
42448df1cd07Sbellard {
42458df1cd07Sbellard     int io_index;
42468df1cd07Sbellard     uint8_t *ptr;
42478df1cd07Sbellard     uint32_t val;
42488df1cd07Sbellard     unsigned long pd;
42498df1cd07Sbellard     PhysPageDesc *p;
42508df1cd07Sbellard 
42518df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
42528df1cd07Sbellard     if (!p) {
42538df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
42548df1cd07Sbellard     } else {
42558df1cd07Sbellard         pd = p->phys_offset;
42568df1cd07Sbellard     }
42578df1cd07Sbellard 
42582a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
42592a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
42608df1cd07Sbellard         /* I/O case */
42618df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
42628da3ff18Spbrook         if (p)
42638da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
42648df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
42651e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
42661e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
42671e78bcc1SAlexander Graf             val = bswap32(val);
42681e78bcc1SAlexander Graf         }
42691e78bcc1SAlexander Graf #else
42701e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
42711e78bcc1SAlexander Graf             val = bswap32(val);
42721e78bcc1SAlexander Graf         }
42731e78bcc1SAlexander Graf #endif
42748df1cd07Sbellard     } else {
42758df1cd07Sbellard         /* RAM case */
42765579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
42778df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
42781e78bcc1SAlexander Graf         switch (endian) {
42791e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
42801e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
42811e78bcc1SAlexander Graf             break;
42821e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
42831e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
42841e78bcc1SAlexander Graf             break;
42851e78bcc1SAlexander Graf         default:
42868df1cd07Sbellard             val = ldl_p(ptr);
42871e78bcc1SAlexander Graf             break;
42881e78bcc1SAlexander Graf         }
42898df1cd07Sbellard     }
42908df1cd07Sbellard     return val;
42918df1cd07Sbellard }
42928df1cd07Sbellard 
42931e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr)
42941e78bcc1SAlexander Graf {
42951e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
42961e78bcc1SAlexander Graf }
42971e78bcc1SAlexander Graf 
42981e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr)
42991e78bcc1SAlexander Graf {
43001e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
43011e78bcc1SAlexander Graf }
43021e78bcc1SAlexander Graf 
43031e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr)
43041e78bcc1SAlexander Graf {
43051e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
43061e78bcc1SAlexander Graf }
43071e78bcc1SAlexander Graf 
430884b7b8e7Sbellard /* warning: addr must be aligned */
43091e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
43101e78bcc1SAlexander Graf                                          enum device_endian endian)
431184b7b8e7Sbellard {
431284b7b8e7Sbellard     int io_index;
431384b7b8e7Sbellard     uint8_t *ptr;
431484b7b8e7Sbellard     uint64_t val;
431584b7b8e7Sbellard     unsigned long pd;
431684b7b8e7Sbellard     PhysPageDesc *p;
431784b7b8e7Sbellard 
431884b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
431984b7b8e7Sbellard     if (!p) {
432084b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
432184b7b8e7Sbellard     } else {
432284b7b8e7Sbellard         pd = p->phys_offset;
432384b7b8e7Sbellard     }
432484b7b8e7Sbellard 
43252a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
43262a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
432784b7b8e7Sbellard         /* I/O case */
432884b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
43298da3ff18Spbrook         if (p)
43308da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
43311e78bcc1SAlexander Graf 
43321e78bcc1SAlexander Graf         /* XXX This is broken when device endian != cpu endian.
43331e78bcc1SAlexander Graf                Fix and add "endian" variable check */
433484b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
433584b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
433684b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
433784b7b8e7Sbellard #else
433884b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
433984b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
434084b7b8e7Sbellard #endif
434184b7b8e7Sbellard     } else {
434284b7b8e7Sbellard         /* RAM case */
43435579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
434484b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
43451e78bcc1SAlexander Graf         switch (endian) {
43461e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
43471e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
43481e78bcc1SAlexander Graf             break;
43491e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
43501e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
43511e78bcc1SAlexander Graf             break;
43521e78bcc1SAlexander Graf         default:
435384b7b8e7Sbellard             val = ldq_p(ptr);
43541e78bcc1SAlexander Graf             break;
43551e78bcc1SAlexander Graf         }
435684b7b8e7Sbellard     }
435784b7b8e7Sbellard     return val;
435884b7b8e7Sbellard }
435984b7b8e7Sbellard 
43601e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr)
43611e78bcc1SAlexander Graf {
43621e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
43631e78bcc1SAlexander Graf }
43641e78bcc1SAlexander Graf 
43651e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr)
43661e78bcc1SAlexander Graf {
43671e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
43681e78bcc1SAlexander Graf }
43691e78bcc1SAlexander Graf 
43701e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr)
43711e78bcc1SAlexander Graf {
43721e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
43731e78bcc1SAlexander Graf }
43741e78bcc1SAlexander Graf 
4375aab33094Sbellard /* XXX: optimize */
4376c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
4377aab33094Sbellard {
4378aab33094Sbellard     uint8_t val;
4379aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
4380aab33094Sbellard     return val;
4381aab33094Sbellard }
4382aab33094Sbellard 
4383733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
43841e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
43851e78bcc1SAlexander Graf                                           enum device_endian endian)
4386aab33094Sbellard {
4387733f0b02SMichael S. Tsirkin     int io_index;
4388733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4389733f0b02SMichael S. Tsirkin     uint64_t val;
4390733f0b02SMichael S. Tsirkin     unsigned long pd;
4391733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4392733f0b02SMichael S. Tsirkin 
4393733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4394733f0b02SMichael S. Tsirkin     if (!p) {
4395733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4396733f0b02SMichael S. Tsirkin     } else {
4397733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4398733f0b02SMichael S. Tsirkin     }
4399733f0b02SMichael S. Tsirkin 
4400733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4401733f0b02SMichael S. Tsirkin         !(pd & IO_MEM_ROMD)) {
4402733f0b02SMichael S. Tsirkin         /* I/O case */
4403733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4404733f0b02SMichael S. Tsirkin         if (p)
4405733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4406733f0b02SMichael S. Tsirkin         val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
44071e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
44081e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
44091e78bcc1SAlexander Graf             val = bswap16(val);
44101e78bcc1SAlexander Graf         }
44111e78bcc1SAlexander Graf #else
44121e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
44131e78bcc1SAlexander Graf             val = bswap16(val);
44141e78bcc1SAlexander Graf         }
44151e78bcc1SAlexander Graf #endif
4416733f0b02SMichael S. Tsirkin     } else {
4417733f0b02SMichael S. Tsirkin         /* RAM case */
4418733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4419733f0b02SMichael S. Tsirkin             (addr & ~TARGET_PAGE_MASK);
44201e78bcc1SAlexander Graf         switch (endian) {
44211e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
44221e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
44231e78bcc1SAlexander Graf             break;
44241e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
44251e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
44261e78bcc1SAlexander Graf             break;
44271e78bcc1SAlexander Graf         default:
4428733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
44291e78bcc1SAlexander Graf             break;
44301e78bcc1SAlexander Graf         }
4431733f0b02SMichael S. Tsirkin     }
4432733f0b02SMichael S. Tsirkin     return val;
4433aab33094Sbellard }
4434aab33094Sbellard 
44351e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr)
44361e78bcc1SAlexander Graf {
44371e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
44381e78bcc1SAlexander Graf }
44391e78bcc1SAlexander Graf 
44401e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr)
44411e78bcc1SAlexander Graf {
44421e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
44431e78bcc1SAlexander Graf }
44441e78bcc1SAlexander Graf 
44451e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr)
44461e78bcc1SAlexander Graf {
44471e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
44481e78bcc1SAlexander Graf }
44491e78bcc1SAlexander Graf 
44508df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
44518df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
44528df1cd07Sbellard    bits are used to track modified PTEs */
4453c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
44548df1cd07Sbellard {
44558df1cd07Sbellard     int io_index;
44568df1cd07Sbellard     uint8_t *ptr;
44578df1cd07Sbellard     unsigned long pd;
44588df1cd07Sbellard     PhysPageDesc *p;
44598df1cd07Sbellard 
44608df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
44618df1cd07Sbellard     if (!p) {
44628df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
44638df1cd07Sbellard     } else {
44648df1cd07Sbellard         pd = p->phys_offset;
44658df1cd07Sbellard     }
44668df1cd07Sbellard 
44673a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
44688df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
44698da3ff18Spbrook         if (p)
44708da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
44718df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
44728df1cd07Sbellard     } else {
447374576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
44745579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
44758df1cd07Sbellard         stl_p(ptr, val);
447674576198Saliguori 
447774576198Saliguori         if (unlikely(in_migration)) {
447874576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
447974576198Saliguori                 /* invalidate code */
448074576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
448174576198Saliguori                 /* set dirty bit */
4482f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
4483f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
448474576198Saliguori             }
448574576198Saliguori         }
44868df1cd07Sbellard     }
44878df1cd07Sbellard }
44888df1cd07Sbellard 
4489c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4490bc98a7efSj_mayer {
4491bc98a7efSj_mayer     int io_index;
4492bc98a7efSj_mayer     uint8_t *ptr;
4493bc98a7efSj_mayer     unsigned long pd;
4494bc98a7efSj_mayer     PhysPageDesc *p;
4495bc98a7efSj_mayer 
4496bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4497bc98a7efSj_mayer     if (!p) {
4498bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
4499bc98a7efSj_mayer     } else {
4500bc98a7efSj_mayer         pd = p->phys_offset;
4501bc98a7efSj_mayer     }
4502bc98a7efSj_mayer 
4503bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4504bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
45058da3ff18Spbrook         if (p)
45068da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4507bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
4508bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4509bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4510bc98a7efSj_mayer #else
4511bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4512bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4513bc98a7efSj_mayer #endif
4514bc98a7efSj_mayer     } else {
45155579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4516bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
4517bc98a7efSj_mayer         stq_p(ptr, val);
4518bc98a7efSj_mayer     }
4519bc98a7efSj_mayer }
4520bc98a7efSj_mayer 
45218df1cd07Sbellard /* warning: addr must be aligned */
45221e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
45231e78bcc1SAlexander Graf                                      enum device_endian endian)
45248df1cd07Sbellard {
45258df1cd07Sbellard     int io_index;
45268df1cd07Sbellard     uint8_t *ptr;
45278df1cd07Sbellard     unsigned long pd;
45288df1cd07Sbellard     PhysPageDesc *p;
45298df1cd07Sbellard 
45308df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
45318df1cd07Sbellard     if (!p) {
45328df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
45338df1cd07Sbellard     } else {
45348df1cd07Sbellard         pd = p->phys_offset;
45358df1cd07Sbellard     }
45368df1cd07Sbellard 
45373a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
45388df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
45398da3ff18Spbrook         if (p)
45408da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
45411e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
45421e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
45431e78bcc1SAlexander Graf             val = bswap32(val);
45441e78bcc1SAlexander Graf         }
45451e78bcc1SAlexander Graf #else
45461e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
45471e78bcc1SAlexander Graf             val = bswap32(val);
45481e78bcc1SAlexander Graf         }
45491e78bcc1SAlexander Graf #endif
45508df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
45518df1cd07Sbellard     } else {
45528df1cd07Sbellard         unsigned long addr1;
45538df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
45548df1cd07Sbellard         /* RAM case */
45555579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
45561e78bcc1SAlexander Graf         switch (endian) {
45571e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
45581e78bcc1SAlexander Graf             stl_le_p(ptr, val);
45591e78bcc1SAlexander Graf             break;
45601e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
45611e78bcc1SAlexander Graf             stl_be_p(ptr, val);
45621e78bcc1SAlexander Graf             break;
45631e78bcc1SAlexander Graf         default:
45648df1cd07Sbellard             stl_p(ptr, val);
45651e78bcc1SAlexander Graf             break;
45661e78bcc1SAlexander Graf         }
45673a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
45688df1cd07Sbellard             /* invalidate code */
45698df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
45708df1cd07Sbellard             /* set dirty bit */
4571f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
4572f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
45738df1cd07Sbellard         }
45748df1cd07Sbellard     }
45753a7d929eSbellard }
45768df1cd07Sbellard 
45771e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val)
45781e78bcc1SAlexander Graf {
45791e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
45801e78bcc1SAlexander Graf }
45811e78bcc1SAlexander Graf 
45821e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val)
45831e78bcc1SAlexander Graf {
45841e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
45851e78bcc1SAlexander Graf }
45861e78bcc1SAlexander Graf 
45871e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val)
45881e78bcc1SAlexander Graf {
45891e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
45901e78bcc1SAlexander Graf }
45911e78bcc1SAlexander Graf 
4592aab33094Sbellard /* XXX: optimize */
4593c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
4594aab33094Sbellard {
4595aab33094Sbellard     uint8_t v = val;
4596aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
4597aab33094Sbellard }
4598aab33094Sbellard 
4599733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
46001e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
46011e78bcc1SAlexander Graf                                      enum device_endian endian)
4602aab33094Sbellard {
4603733f0b02SMichael S. Tsirkin     int io_index;
4604733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4605733f0b02SMichael S. Tsirkin     unsigned long pd;
4606733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4607733f0b02SMichael S. Tsirkin 
4608733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4609733f0b02SMichael S. Tsirkin     if (!p) {
4610733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4611733f0b02SMichael S. Tsirkin     } else {
4612733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4613733f0b02SMichael S. Tsirkin     }
4614733f0b02SMichael S. Tsirkin 
4615733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4616733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4617733f0b02SMichael S. Tsirkin         if (p)
4618733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
46191e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
46201e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
46211e78bcc1SAlexander Graf             val = bswap16(val);
46221e78bcc1SAlexander Graf         }
46231e78bcc1SAlexander Graf #else
46241e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
46251e78bcc1SAlexander Graf             val = bswap16(val);
46261e78bcc1SAlexander Graf         }
46271e78bcc1SAlexander Graf #endif
4628733f0b02SMichael S. Tsirkin         io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4629733f0b02SMichael S. Tsirkin     } else {
4630733f0b02SMichael S. Tsirkin         unsigned long addr1;
4631733f0b02SMichael S. Tsirkin         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4632733f0b02SMichael S. Tsirkin         /* RAM case */
4633733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
46341e78bcc1SAlexander Graf         switch (endian) {
46351e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
46361e78bcc1SAlexander Graf             stw_le_p(ptr, val);
46371e78bcc1SAlexander Graf             break;
46381e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
46391e78bcc1SAlexander Graf             stw_be_p(ptr, val);
46401e78bcc1SAlexander Graf             break;
46411e78bcc1SAlexander Graf         default:
4642733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
46431e78bcc1SAlexander Graf             break;
46441e78bcc1SAlexander Graf         }
4645733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
4646733f0b02SMichael S. Tsirkin             /* invalidate code */
4647733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4648733f0b02SMichael S. Tsirkin             /* set dirty bit */
4649733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
4650733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
4651733f0b02SMichael S. Tsirkin         }
4652733f0b02SMichael S. Tsirkin     }
4653aab33094Sbellard }
4654aab33094Sbellard 
46551e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val)
46561e78bcc1SAlexander Graf {
46571e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
46581e78bcc1SAlexander Graf }
46591e78bcc1SAlexander Graf 
46601e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val)
46611e78bcc1SAlexander Graf {
46621e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
46631e78bcc1SAlexander Graf }
46641e78bcc1SAlexander Graf 
46651e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val)
46661e78bcc1SAlexander Graf {
46671e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
46681e78bcc1SAlexander Graf }
46691e78bcc1SAlexander Graf 
4670aab33094Sbellard /* XXX: optimize */
4671c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4672aab33094Sbellard {
4673aab33094Sbellard     val = tswap64(val);
467471d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4675aab33094Sbellard }
4676aab33094Sbellard 
46771e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val)
46781e78bcc1SAlexander Graf {
46791e78bcc1SAlexander Graf     val = cpu_to_le64(val);
46801e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
46811e78bcc1SAlexander Graf }
46821e78bcc1SAlexander Graf 
46831e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val)
46841e78bcc1SAlexander Graf {
46851e78bcc1SAlexander Graf     val = cpu_to_be64(val);
46861e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
46871e78bcc1SAlexander Graf }
46881e78bcc1SAlexander Graf 
46895e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
4690b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4691b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
469213eb76e0Sbellard {
469313eb76e0Sbellard     int l;
4694c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
46959b3c35e0Sj_mayer     target_ulong page;
469613eb76e0Sbellard 
469713eb76e0Sbellard     while (len > 0) {
469813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
469913eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
470013eb76e0Sbellard         /* if no physical page mapped, return an error */
470113eb76e0Sbellard         if (phys_addr == -1)
470213eb76e0Sbellard             return -1;
470313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
470413eb76e0Sbellard         if (l > len)
470513eb76e0Sbellard             l = len;
47065e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
47075e2972fdSaliguori         if (is_write)
47085e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
47095e2972fdSaliguori         else
47105e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
471113eb76e0Sbellard         len -= l;
471213eb76e0Sbellard         buf += l;
471313eb76e0Sbellard         addr += l;
471413eb76e0Sbellard     }
471513eb76e0Sbellard     return 0;
471613eb76e0Sbellard }
4717a68fe89cSPaul Brook #endif
471813eb76e0Sbellard 
47192e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
47202e70f6efSpbrook    must be at the end of the TB */
47212e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
47222e70f6efSpbrook {
47232e70f6efSpbrook     TranslationBlock *tb;
47242e70f6efSpbrook     uint32_t n, cflags;
47252e70f6efSpbrook     target_ulong pc, cs_base;
47262e70f6efSpbrook     uint64_t flags;
47272e70f6efSpbrook 
47282e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
47292e70f6efSpbrook     if (!tb) {
47302e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
47312e70f6efSpbrook                   retaddr);
47322e70f6efSpbrook     }
47332e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
4734618ba8e6SStefan Weil     cpu_restore_state(tb, env, (unsigned long)retaddr);
47352e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4736bf20dc07Sths        occurred.  */
47372e70f6efSpbrook     n = n - env->icount_decr.u16.low;
47382e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
47392e70f6efSpbrook     n++;
47402e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
47412e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4742bf20dc07Sths        the first instruction in a TB then re-execute the preceding
47432e70f6efSpbrook        branch.  */
47442e70f6efSpbrook #if defined(TARGET_MIPS)
47452e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
47462e70f6efSpbrook         env->active_tc.PC -= 4;
47472e70f6efSpbrook         env->icount_decr.u16.low++;
47482e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
47492e70f6efSpbrook     }
47502e70f6efSpbrook #elif defined(TARGET_SH4)
47512e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
47522e70f6efSpbrook             && n > 1) {
47532e70f6efSpbrook         env->pc -= 2;
47542e70f6efSpbrook         env->icount_decr.u16.low++;
47552e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
47562e70f6efSpbrook     }
47572e70f6efSpbrook #endif
47582e70f6efSpbrook     /* This should never happen.  */
47592e70f6efSpbrook     if (n > CF_COUNT_MASK)
47602e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
47612e70f6efSpbrook 
47622e70f6efSpbrook     cflags = n | CF_LAST_IO;
47632e70f6efSpbrook     pc = tb->pc;
47642e70f6efSpbrook     cs_base = tb->cs_base;
47652e70f6efSpbrook     flags = tb->flags;
47662e70f6efSpbrook     tb_phys_invalidate(tb, -1);
47672e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
47682e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
47692e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4770bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
47712e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
47722e70f6efSpbrook        repeating the fault, which is horribly inefficient.
47732e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
47742e70f6efSpbrook        second new TB.  */
47752e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
47762e70f6efSpbrook }
47772e70f6efSpbrook 
4778b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4779b3755a91SPaul Brook 
4780055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4781e3db7226Sbellard {
4782e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4783e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4784e3db7226Sbellard     TranslationBlock *tb;
4785e3db7226Sbellard 
4786e3db7226Sbellard     target_code_size = 0;
4787e3db7226Sbellard     max_target_code_size = 0;
4788e3db7226Sbellard     cross_page = 0;
4789e3db7226Sbellard     direct_jmp_count = 0;
4790e3db7226Sbellard     direct_jmp2_count = 0;
4791e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4792e3db7226Sbellard         tb = &tbs[i];
4793e3db7226Sbellard         target_code_size += tb->size;
4794e3db7226Sbellard         if (tb->size > max_target_code_size)
4795e3db7226Sbellard             max_target_code_size = tb->size;
4796e3db7226Sbellard         if (tb->page_addr[1] != -1)
4797e3db7226Sbellard             cross_page++;
4798e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4799e3db7226Sbellard             direct_jmp_count++;
4800e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4801e3db7226Sbellard                 direct_jmp2_count++;
4802e3db7226Sbellard             }
4803e3db7226Sbellard         }
4804e3db7226Sbellard     }
4805e3db7226Sbellard     /* XXX: avoid using doubles ? */
480657fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4807055403b2SStefan Weil     cpu_fprintf(f, "gen code size       %td/%ld\n",
480826a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
480926a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
481026a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4811e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4812e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4813e3db7226Sbellard                 max_target_code_size);
4814055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4815e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4816e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4817e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4818e3db7226Sbellard             cross_page,
4819e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4820e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4821e3db7226Sbellard                 direct_jmp_count,
4822e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4823e3db7226Sbellard                 direct_jmp2_count,
4824e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
482557fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4826e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4827e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4828e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4829b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4830e3db7226Sbellard }
4831e3db7226Sbellard 
483261382a50Sbellard #define MMUSUFFIX _cmmu
48333917149dSBlue Swirl #undef GETPC
483461382a50Sbellard #define GETPC() NULL
483561382a50Sbellard #define env cpu_single_env
4836b769d8feSbellard #define SOFTMMU_CODE_ACCESS
483761382a50Sbellard 
483861382a50Sbellard #define SHIFT 0
483961382a50Sbellard #include "softmmu_template.h"
484061382a50Sbellard 
484161382a50Sbellard #define SHIFT 1
484261382a50Sbellard #include "softmmu_template.h"
484361382a50Sbellard 
484461382a50Sbellard #define SHIFT 2
484561382a50Sbellard #include "softmmu_template.h"
484661382a50Sbellard 
484761382a50Sbellard #define SHIFT 3
484861382a50Sbellard #include "softmmu_template.h"
484961382a50Sbellard 
485061382a50Sbellard #undef env
485161382a50Sbellard 
485261382a50Sbellard #endif
4853