xref: /qemu/system/physmem.c (revision 6506e4f995967b1a48cc34418c77b318df92ce35)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
296180a181Sbellard #include "exec-all.h"
30b67d9a52Sbellard #include "tcg.h"
31b3c7724cSpbrook #include "hw/hw.h"
32cc9e98cbSAlex Williamson #include "hw/qdev.h"
3374576198Saliguori #include "osdep.h"
347ba1e619Saliguori #include "kvm.h"
35432d268cSJun Nakajima #include "hw/xen.h"
3629e922b6SBlue Swirl #include "qemu-timer.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
39f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40f01576f1SJuergen Lock #include <sys/param.h>
41f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
42f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
43f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
44f01576f1SJuergen Lock #include <sys/time.h>
45f01576f1SJuergen Lock #include <sys/proc.h>
46f01576f1SJuergen Lock #include <machine/profile.h>
47f01576f1SJuergen Lock #define _KERNEL
48f01576f1SJuergen Lock #include <sys/user.h>
49f01576f1SJuergen Lock #undef _KERNEL
50f01576f1SJuergen Lock #undef sigqueue
51f01576f1SJuergen Lock #include <libutil.h>
52f01576f1SJuergen Lock #endif
53f01576f1SJuergen Lock #endif
54432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
55432d268cSJun Nakajima #include "xen-mapcache.h"
566506e4f9SStefano Stabellini #include "trace.h"
5753a5960aSpbrook #endif
5854936004Sbellard 
59fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6066e85a21Sbellard //#define DEBUG_FLUSH
619fa3e853Sbellard //#define DEBUG_TLB
6267d3b957Spbrook //#define DEBUG_UNASSIGNED
63fd6ce8f6Sbellard 
64fd6ce8f6Sbellard /* make various TB consistency checks */
65fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
6698857888Sbellard //#define DEBUG_TLB_CHECK
67fd6ce8f6Sbellard 
681196be37Sths //#define DEBUG_IOPORT
69db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
701196be37Sths 
7199773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7299773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7399773bd4Spbrook #undef DEBUG_TB_CHECK
7499773bd4Spbrook #endif
7599773bd4Spbrook 
769fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
779fa3e853Sbellard 
78bdaf78e0Sblueswir1 static TranslationBlock *tbs;
7924ab68acSStefan Weil static int code_gen_max_blocks;
809fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
81bdaf78e0Sblueswir1 static int nb_tbs;
82eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
83c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
84fd6ce8f6Sbellard 
85141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
86141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
87141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
88d03d860bSblueswir1  section close to code segment. */
89d03d860bSblueswir1 #define code_gen_section                                \
90d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
91d03d860bSblueswir1     __attribute__((aligned (32)))
92f8e2af11SStefan Weil #elif defined(_WIN32)
93f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
94f8e2af11SStefan Weil #define code_gen_section                                \
95f8e2af11SStefan Weil     __attribute__((aligned (16)))
96d03d860bSblueswir1 #else
97d03d860bSblueswir1 #define code_gen_section                                \
98d03d860bSblueswir1     __attribute__((aligned (32)))
99d03d860bSblueswir1 #endif
100d03d860bSblueswir1 
101d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
102bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
103bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10426a5f13bSbellard /* threshold to flush the translated code buffer */
105bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
10624ab68acSStefan Weil static uint8_t *code_gen_ptr;
107fd6ce8f6Sbellard 
108e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1099fa3e853Sbellard int phys_ram_fd;
11074576198Saliguori static int in_migration;
11194a6b54fSpbrook 
112f471a17eSAlex Williamson RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
113e2eef170Spbrook #endif
1149fa3e853Sbellard 
1156a00d601Sbellard CPUState *first_cpu;
1166a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1176a00d601Sbellard    cpu_exec() */
1186a00d601Sbellard CPUState *cpu_single_env;
1192e70f6efSpbrook /* 0 = Do not count executed instructions.
120bf20dc07Sths    1 = Precise instruction counting.
1212e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1222e70f6efSpbrook int use_icount = 0;
1232e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1242e70f6efSpbrook    include some instructions that have not yet been executed.  */
1252e70f6efSpbrook int64_t qemu_icount;
1266a00d601Sbellard 
12754936004Sbellard typedef struct PageDesc {
12892e873b9Sbellard     /* list of TBs intersecting this ram page */
129fd6ce8f6Sbellard     TranslationBlock *first_tb;
1309fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1319fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1329fa3e853Sbellard     unsigned int code_write_count;
1339fa3e853Sbellard     uint8_t *code_bitmap;
1349fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1359fa3e853Sbellard     unsigned long flags;
1369fa3e853Sbellard #endif
13754936004Sbellard } PageDesc;
13854936004Sbellard 
13941c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1405cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1415cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14241c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
14341c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
14441c1b1c9SPaul Brook #else
1455cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
14641c1b1c9SPaul Brook #endif
147bedb69eaSj_mayer #else
1485cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
149bedb69eaSj_mayer #endif
15054936004Sbellard 
1515cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1525cd2c5b6SRichard Henderson #define L2_BITS 10
15354936004Sbellard #define L2_SIZE (1 << L2_BITS)
15454936004Sbellard 
1555cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1565cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1575cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1585cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1595cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1605cd2c5b6SRichard Henderson 
1615cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1625cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1635cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1645cd2c5b6SRichard Henderson #else
1655cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1665cd2c5b6SRichard Henderson #endif
1675cd2c5b6SRichard Henderson 
1685cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1695cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1705cd2c5b6SRichard Henderson #else
1715cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1725cd2c5b6SRichard Henderson #endif
1735cd2c5b6SRichard Henderson 
1745cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1755cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1765cd2c5b6SRichard Henderson 
1775cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1785cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1795cd2c5b6SRichard Henderson 
18083fb7adfSbellard unsigned long qemu_real_host_page_size;
18183fb7adfSbellard unsigned long qemu_host_page_bits;
18283fb7adfSbellard unsigned long qemu_host_page_size;
18383fb7adfSbellard unsigned long qemu_host_page_mask;
18454936004Sbellard 
1855cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1865cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1875cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18854936004Sbellard 
189e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
19041c1b1c9SPaul Brook typedef struct PhysPageDesc {
19141c1b1c9SPaul Brook     /* offset in host memory of the page + io_index in the low bits */
19241c1b1c9SPaul Brook     ram_addr_t phys_offset;
19341c1b1c9SPaul Brook     ram_addr_t region_offset;
19441c1b1c9SPaul Brook } PhysPageDesc;
19541c1b1c9SPaul Brook 
1965cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
1975cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
1985cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
1996d9a1304SPaul Brook 
200e2eef170Spbrook static void io_mem_init(void);
201e2eef170Spbrook 
20233417e70Sbellard /* io memory support */
20333417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
20433417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
205a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
206511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2076658ffb8Spbrook static int io_mem_watch;
2086658ffb8Spbrook #endif
20933417e70Sbellard 
21034865134Sbellard /* log support */
2111e8b27caSJuha Riihimäki #ifdef WIN32
2121e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2131e8b27caSJuha Riihimäki #else
214d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2151e8b27caSJuha Riihimäki #endif
21634865134Sbellard FILE *logfile;
21734865134Sbellard int loglevel;
218e735b91cSpbrook static int log_append = 0;
21934865134Sbellard 
220e3db7226Sbellard /* statistics */
221b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
222e3db7226Sbellard static int tlb_flush_count;
223b3755a91SPaul Brook #endif
224e3db7226Sbellard static int tb_flush_count;
225e3db7226Sbellard static int tb_phys_invalidate_count;
226e3db7226Sbellard 
2277cb69caeSbellard #ifdef _WIN32
2287cb69caeSbellard static void map_exec(void *addr, long size)
2297cb69caeSbellard {
2307cb69caeSbellard     DWORD old_protect;
2317cb69caeSbellard     VirtualProtect(addr, size,
2327cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2337cb69caeSbellard 
2347cb69caeSbellard }
2357cb69caeSbellard #else
2367cb69caeSbellard static void map_exec(void *addr, long size)
2377cb69caeSbellard {
2384369415fSbellard     unsigned long start, end, page_size;
2397cb69caeSbellard 
2404369415fSbellard     page_size = getpagesize();
2417cb69caeSbellard     start = (unsigned long)addr;
2424369415fSbellard     start &= ~(page_size - 1);
2437cb69caeSbellard 
2447cb69caeSbellard     end = (unsigned long)addr + size;
2454369415fSbellard     end += page_size - 1;
2464369415fSbellard     end &= ~(page_size - 1);
2477cb69caeSbellard 
2487cb69caeSbellard     mprotect((void *)start, end - start,
2497cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2507cb69caeSbellard }
2517cb69caeSbellard #endif
2527cb69caeSbellard 
253b346ff46Sbellard static void page_init(void)
25454936004Sbellard {
25583fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
25654936004Sbellard        TARGET_PAGE_SIZE */
257c2b48b69Saliguori #ifdef _WIN32
258c2b48b69Saliguori     {
259c2b48b69Saliguori         SYSTEM_INFO system_info;
260c2b48b69Saliguori 
261c2b48b69Saliguori         GetSystemInfo(&system_info);
262c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
263c2b48b69Saliguori     }
264c2b48b69Saliguori #else
265c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
266c2b48b69Saliguori #endif
26783fb7adfSbellard     if (qemu_host_page_size == 0)
26883fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26983fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
27083fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
27183fb7adfSbellard     qemu_host_page_bits = 0;
27283fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
27383fb7adfSbellard         qemu_host_page_bits++;
27483fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
27550a9569bSbalrog 
2762e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27750a9569bSbalrog     {
278f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
279f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
280f01576f1SJuergen Lock         int i, cnt;
281f01576f1SJuergen Lock 
282f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
283f01576f1SJuergen Lock         if (freep) {
284f01576f1SJuergen Lock             mmap_lock();
285f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
286f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
287f01576f1SJuergen Lock 
288f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
289f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
290f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
291f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
292f01576f1SJuergen Lock 
293f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
294f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
295fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
296f01576f1SJuergen Lock                     } else {
297f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298f01576f1SJuergen Lock                         endaddr = ~0ul;
299fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300f01576f1SJuergen Lock #endif
301f01576f1SJuergen Lock                     }
302f01576f1SJuergen Lock                 }
303f01576f1SJuergen Lock             }
304f01576f1SJuergen Lock             free(freep);
305f01576f1SJuergen Lock             mmap_unlock();
306f01576f1SJuergen Lock         }
307f01576f1SJuergen Lock #else
30850a9569bSbalrog         FILE *f;
30950a9569bSbalrog 
3100776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3115cd2c5b6SRichard Henderson 
312fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
31350a9569bSbalrog         if (f) {
3145cd2c5b6SRichard Henderson             mmap_lock();
3155cd2c5b6SRichard Henderson 
31650a9569bSbalrog             do {
3175cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3185cd2c5b6SRichard Henderson                 int n;
3195cd2c5b6SRichard Henderson 
3205cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3215cd2c5b6SRichard Henderson 
3225cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3235cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3245cd2c5b6SRichard Henderson 
3255cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3265cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3275cd2c5b6SRichard Henderson                     } else {
3285cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3295cd2c5b6SRichard Henderson                     }
3305cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
33150a9569bSbalrog                 }
33250a9569bSbalrog             } while (!feof(f));
3335cd2c5b6SRichard Henderson 
33450a9569bSbalrog             fclose(f);
335c8a706feSpbrook             mmap_unlock();
33650a9569bSbalrog         }
337f01576f1SJuergen Lock #endif
3385cd2c5b6SRichard Henderson     }
33950a9569bSbalrog #endif
34054936004Sbellard }
34154936004Sbellard 
34241c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
34354936004Sbellard {
34441c1b1c9SPaul Brook     PageDesc *pd;
34541c1b1c9SPaul Brook     void **lp;
34641c1b1c9SPaul Brook     int i;
34741c1b1c9SPaul Brook 
34817e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3492e9a5713SPaul Brook     /* We can't use qemu_malloc because it may recurse into a locked mutex. */
3505cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3515cd2c5b6SRichard Henderson     do {                                                \
3525cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3535cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3545cd2c5b6SRichard Henderson     } while (0)
3555cd2c5b6SRichard Henderson #else
3565cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3575cd2c5b6SRichard Henderson     do { P = qemu_mallocz(SIZE); } while (0)
3585cd2c5b6SRichard Henderson #endif
3595cd2c5b6SRichard Henderson 
3605cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3615cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3625cd2c5b6SRichard Henderson 
3635cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3645cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3655cd2c5b6SRichard Henderson         void **p = *lp;
3665cd2c5b6SRichard Henderson 
3675cd2c5b6SRichard Henderson         if (p == NULL) {
3685cd2c5b6SRichard Henderson             if (!alloc) {
3695cd2c5b6SRichard Henderson                 return NULL;
3705cd2c5b6SRichard Henderson             }
3715cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
37254936004Sbellard             *lp = p;
3735cd2c5b6SRichard Henderson         }
3745cd2c5b6SRichard Henderson 
3755cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3765cd2c5b6SRichard Henderson     }
3775cd2c5b6SRichard Henderson 
3785cd2c5b6SRichard Henderson     pd = *lp;
3795cd2c5b6SRichard Henderson     if (pd == NULL) {
3805cd2c5b6SRichard Henderson         if (!alloc) {
3815cd2c5b6SRichard Henderson             return NULL;
3825cd2c5b6SRichard Henderson         }
3835cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3845cd2c5b6SRichard Henderson         *lp = pd;
3855cd2c5b6SRichard Henderson     }
3865cd2c5b6SRichard Henderson 
3875cd2c5b6SRichard Henderson #undef ALLOC
3885cd2c5b6SRichard Henderson 
3895cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
39054936004Sbellard }
39154936004Sbellard 
39241c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
39354936004Sbellard {
3945cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
39554936004Sbellard }
39654936004Sbellard 
3976d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
398c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
39992e873b9Sbellard {
400e3f4e2a4Spbrook     PhysPageDesc *pd;
4015cd2c5b6SRichard Henderson     void **lp;
402e3f4e2a4Spbrook     int i;
4035cd2c5b6SRichard Henderson 
4045cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
4055cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
4065cd2c5b6SRichard Henderson 
4075cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
4085cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
4095cd2c5b6SRichard Henderson         void **p = *lp;
4105cd2c5b6SRichard Henderson         if (p == NULL) {
4115cd2c5b6SRichard Henderson             if (!alloc) {
412108c49b8Sbellard                 return NULL;
4135cd2c5b6SRichard Henderson             }
4145cd2c5b6SRichard Henderson             *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
4155cd2c5b6SRichard Henderson         }
4165cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4175cd2c5b6SRichard Henderson     }
4185cd2c5b6SRichard Henderson 
4195cd2c5b6SRichard Henderson     pd = *lp;
4205cd2c5b6SRichard Henderson     if (pd == NULL) {
4215cd2c5b6SRichard Henderson         int i;
4225cd2c5b6SRichard Henderson 
4235cd2c5b6SRichard Henderson         if (!alloc) {
4245cd2c5b6SRichard Henderson             return NULL;
4255cd2c5b6SRichard Henderson         }
4265cd2c5b6SRichard Henderson 
4275cd2c5b6SRichard Henderson         *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4285cd2c5b6SRichard Henderson 
42967c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
430e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
43167c4d23cSpbrook             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
43267c4d23cSpbrook         }
43392e873b9Sbellard     }
4345cd2c5b6SRichard Henderson 
4355cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
43692e873b9Sbellard }
43792e873b9Sbellard 
438c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
43992e873b9Sbellard {
440108c49b8Sbellard     return phys_page_find_alloc(index, 0);
44192e873b9Sbellard }
44292e873b9Sbellard 
443c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
444c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4453a7d929eSbellard                                     target_ulong vaddr);
446c8a706feSpbrook #define mmap_lock() do { } while(0)
447c8a706feSpbrook #define mmap_unlock() do { } while(0)
4489fa3e853Sbellard #endif
449fd6ce8f6Sbellard 
4504369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4514369415fSbellard 
4524369415fSbellard #if defined(CONFIG_USER_ONLY)
453ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4544369415fSbellard    user mode. It will change when a dedicated libc will be used */
4554369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4564369415fSbellard #endif
4574369415fSbellard 
4584369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
459ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
460ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
4614369415fSbellard #endif
4624369415fSbellard 
4638fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
46426a5f13bSbellard {
4654369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4664369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4674369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4684369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4694369415fSbellard #else
47026a5f13bSbellard     code_gen_buffer_size = tb_size;
47126a5f13bSbellard     if (code_gen_buffer_size == 0) {
4724369415fSbellard #if defined(CONFIG_USER_ONLY)
4734369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4744369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4754369415fSbellard #else
476ccbb4d44SStuart Brady         /* XXX: needs adjustments */
47794a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4784369415fSbellard #endif
47926a5f13bSbellard     }
48026a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
48126a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
48226a5f13bSbellard     /* The code gen buffer location may have constraints depending on
48326a5f13bSbellard        the host cpu and OS */
48426a5f13bSbellard #if defined(__linux__)
48526a5f13bSbellard     {
48626a5f13bSbellard         int flags;
487141ac468Sblueswir1         void *start = NULL;
488141ac468Sblueswir1 
48926a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
49026a5f13bSbellard #if defined(__x86_64__)
49126a5f13bSbellard         flags |= MAP_32BIT;
49226a5f13bSbellard         /* Cannot map more than that */
49326a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
49426a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
495141ac468Sblueswir1 #elif defined(__sparc_v9__)
496141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
497141ac468Sblueswir1         flags |= MAP_FIXED;
498141ac468Sblueswir1         start = (void *) 0x60000000UL;
499141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
500141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5011cb0661eSbalrog #elif defined(__arm__)
50263d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
5031cb0661eSbalrog         flags |= MAP_FIXED;
5041cb0661eSbalrog         start = (void *) 0x01000000UL;
5051cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5061cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
507eba0b893SRichard Henderson #elif defined(__s390x__)
508eba0b893SRichard Henderson         /* Map the buffer so that we can use direct calls and branches.  */
509eba0b893SRichard Henderson         /* We have a +- 4GB range on the branches; leave some slop.  */
510eba0b893SRichard Henderson         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511eba0b893SRichard Henderson             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512eba0b893SRichard Henderson         }
513eba0b893SRichard Henderson         start = (void *)0x90000000UL;
51426a5f13bSbellard #endif
515141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
51626a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
51726a5f13bSbellard                                flags, -1, 0);
51826a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
51926a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
52026a5f13bSbellard             exit(1);
52126a5f13bSbellard         }
52226a5f13bSbellard     }
523cbb608a5SBrad #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524cbb608a5SBrad     || defined(__DragonFly__) || defined(__OpenBSD__)
52506e67a82Saliguori     {
52606e67a82Saliguori         int flags;
52706e67a82Saliguori         void *addr = NULL;
52806e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
52906e67a82Saliguori #if defined(__x86_64__)
53006e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
53106e67a82Saliguori          * 0x40000000 is free */
53206e67a82Saliguori         flags |= MAP_FIXED;
53306e67a82Saliguori         addr = (void *)0x40000000;
53406e67a82Saliguori         /* Cannot map more than that */
53506e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
53606e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
5374cd31ad2SBlue Swirl #elif defined(__sparc_v9__)
5384cd31ad2SBlue Swirl         // Map the buffer below 2G, so we can use direct calls and branches
5394cd31ad2SBlue Swirl         flags |= MAP_FIXED;
5404cd31ad2SBlue Swirl         addr = (void *) 0x60000000UL;
5414cd31ad2SBlue Swirl         if (code_gen_buffer_size > (512 * 1024 * 1024)) {
5424cd31ad2SBlue Swirl             code_gen_buffer_size = (512 * 1024 * 1024);
5434cd31ad2SBlue Swirl         }
54406e67a82Saliguori #endif
54506e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
54606e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
54706e67a82Saliguori                                flags, -1, 0);
54806e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
54906e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
55006e67a82Saliguori             exit(1);
55106e67a82Saliguori         }
55206e67a82Saliguori     }
55326a5f13bSbellard #else
55426a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
55526a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
55626a5f13bSbellard #endif
5574369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
55826a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
55926a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
560239fda31SAurelien Jarno         (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
56126a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
56226a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
56326a5f13bSbellard }
56426a5f13bSbellard 
56526a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
56626a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
56726a5f13bSbellard    size. */
56826a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
56926a5f13bSbellard {
57026a5f13bSbellard     cpu_gen_init();
57126a5f13bSbellard     code_gen_alloc(tb_size);
57226a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5734369415fSbellard     page_init();
574e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
57526a5f13bSbellard     io_mem_init();
576e2eef170Spbrook #endif
5779002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
5789002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
5799002ec79SRichard Henderson        initialize the prologue now.  */
5809002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
5819002ec79SRichard Henderson #endif
58226a5f13bSbellard }
58326a5f13bSbellard 
5849656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5859656f324Spbrook 
586e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
587e7f4eff7SJuan Quintela {
588e7f4eff7SJuan Quintela     CPUState *env = opaque;
589e7f4eff7SJuan Quintela 
5903098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5913098dba0Saurel32        version_id is increased. */
5923098dba0Saurel32     env->interrupt_request &= ~0x01;
5939656f324Spbrook     tlb_flush(env, 1);
5949656f324Spbrook 
5959656f324Spbrook     return 0;
5969656f324Spbrook }
597e7f4eff7SJuan Quintela 
598e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
599e7f4eff7SJuan Quintela     .name = "cpu_common",
600e7f4eff7SJuan Quintela     .version_id = 1,
601e7f4eff7SJuan Quintela     .minimum_version_id = 1,
602e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
603e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
604e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
605e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
606e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
607e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
608e7f4eff7SJuan Quintela     }
609e7f4eff7SJuan Quintela };
6109656f324Spbrook #endif
6119656f324Spbrook 
612950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
613950f1472SGlauber Costa {
614950f1472SGlauber Costa     CPUState *env = first_cpu;
615950f1472SGlauber Costa 
616950f1472SGlauber Costa     while (env) {
617950f1472SGlauber Costa         if (env->cpu_index == cpu)
618950f1472SGlauber Costa             break;
619950f1472SGlauber Costa         env = env->next_cpu;
620950f1472SGlauber Costa     }
621950f1472SGlauber Costa 
622950f1472SGlauber Costa     return env;
623950f1472SGlauber Costa }
624950f1472SGlauber Costa 
6256a00d601Sbellard void cpu_exec_init(CPUState *env)
626fd6ce8f6Sbellard {
6276a00d601Sbellard     CPUState **penv;
6286a00d601Sbellard     int cpu_index;
6296a00d601Sbellard 
630c2764719Spbrook #if defined(CONFIG_USER_ONLY)
631c2764719Spbrook     cpu_list_lock();
632c2764719Spbrook #endif
6336a00d601Sbellard     env->next_cpu = NULL;
6346a00d601Sbellard     penv = &first_cpu;
6356a00d601Sbellard     cpu_index = 0;
6366a00d601Sbellard     while (*penv != NULL) {
6371e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6386a00d601Sbellard         cpu_index++;
6396a00d601Sbellard     }
6406a00d601Sbellard     env->cpu_index = cpu_index;
641268a362cSaliguori     env->numa_node = 0;
64272cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
64372cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
644dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
645dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
646dc7a09cfSJan Kiszka #endif
6476a00d601Sbellard     *penv = env;
648c2764719Spbrook #if defined(CONFIG_USER_ONLY)
649c2764719Spbrook     cpu_list_unlock();
650c2764719Spbrook #endif
651b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6520be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
6530be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
654b3c7724cSpbrook                     cpu_save, cpu_load, env);
655b3c7724cSpbrook #endif
656fd6ce8f6Sbellard }
657fd6ce8f6Sbellard 
658d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
659d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
660d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
661d1a1eb74STristan Gingold {
662d1a1eb74STristan Gingold     TranslationBlock *tb;
663d1a1eb74STristan Gingold 
664d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
665d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
666d1a1eb74STristan Gingold         return NULL;
667d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
668d1a1eb74STristan Gingold     tb->pc = pc;
669d1a1eb74STristan Gingold     tb->cflags = 0;
670d1a1eb74STristan Gingold     return tb;
671d1a1eb74STristan Gingold }
672d1a1eb74STristan Gingold 
673d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
674d1a1eb74STristan Gingold {
675d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
676d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
677d1a1eb74STristan Gingold        be the last one generated.  */
678d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
679d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
680d1a1eb74STristan Gingold         nb_tbs--;
681d1a1eb74STristan Gingold     }
682d1a1eb74STristan Gingold }
683d1a1eb74STristan Gingold 
6849fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6859fa3e853Sbellard {
6869fa3e853Sbellard     if (p->code_bitmap) {
68759817ccbSbellard         qemu_free(p->code_bitmap);
6889fa3e853Sbellard         p->code_bitmap = NULL;
6899fa3e853Sbellard     }
6909fa3e853Sbellard     p->code_write_count = 0;
6919fa3e853Sbellard }
6929fa3e853Sbellard 
6935cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
6945cd2c5b6SRichard Henderson 
6955cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
6965cd2c5b6SRichard Henderson {
6975cd2c5b6SRichard Henderson     int i;
6985cd2c5b6SRichard Henderson 
6995cd2c5b6SRichard Henderson     if (*lp == NULL) {
7005cd2c5b6SRichard Henderson         return;
7015cd2c5b6SRichard Henderson     }
7025cd2c5b6SRichard Henderson     if (level == 0) {
7035cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7047296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7055cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7065cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7075cd2c5b6SRichard Henderson         }
7085cd2c5b6SRichard Henderson     } else {
7095cd2c5b6SRichard Henderson         void **pp = *lp;
7107296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7115cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7125cd2c5b6SRichard Henderson         }
7135cd2c5b6SRichard Henderson     }
7145cd2c5b6SRichard Henderson }
7155cd2c5b6SRichard Henderson 
716fd6ce8f6Sbellard static void page_flush_tb(void)
717fd6ce8f6Sbellard {
7185cd2c5b6SRichard Henderson     int i;
7195cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7205cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
721fd6ce8f6Sbellard     }
722fd6ce8f6Sbellard }
723fd6ce8f6Sbellard 
724fd6ce8f6Sbellard /* flush all the translation blocks */
725d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
7266a00d601Sbellard void tb_flush(CPUState *env1)
727fd6ce8f6Sbellard {
7286a00d601Sbellard     CPUState *env;
7290124311eSbellard #if defined(DEBUG_FLUSH)
730ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
731ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
732ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
733ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
734fd6ce8f6Sbellard #endif
73526a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
736a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
737a208e54aSpbrook 
738fd6ce8f6Sbellard     nb_tbs = 0;
7396a00d601Sbellard 
7406a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7418a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
7426a00d601Sbellard     }
7439fa3e853Sbellard 
7448a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
745fd6ce8f6Sbellard     page_flush_tb();
7469fa3e853Sbellard 
747fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
748d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
749d4e8164fSbellard        expensive */
750e3db7226Sbellard     tb_flush_count++;
751fd6ce8f6Sbellard }
752fd6ce8f6Sbellard 
753fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
754fd6ce8f6Sbellard 
755bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
756fd6ce8f6Sbellard {
757fd6ce8f6Sbellard     TranslationBlock *tb;
758fd6ce8f6Sbellard     int i;
759fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
76099773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
76199773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
762fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
763fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
7640bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7650bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
76699773bd4Spbrook                        address, (long)tb->pc, tb->size);
767fd6ce8f6Sbellard             }
768fd6ce8f6Sbellard         }
769fd6ce8f6Sbellard     }
770fd6ce8f6Sbellard }
771fd6ce8f6Sbellard 
772fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
773fd6ce8f6Sbellard static void tb_page_check(void)
774fd6ce8f6Sbellard {
775fd6ce8f6Sbellard     TranslationBlock *tb;
776fd6ce8f6Sbellard     int i, flags1, flags2;
777fd6ce8f6Sbellard 
77899773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
77999773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
780fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
781fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
782fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
783fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
78499773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
785fd6ce8f6Sbellard             }
786fd6ce8f6Sbellard         }
787fd6ce8f6Sbellard     }
788fd6ce8f6Sbellard }
789fd6ce8f6Sbellard 
790fd6ce8f6Sbellard #endif
791fd6ce8f6Sbellard 
792fd6ce8f6Sbellard /* invalidate one TB */
793fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
794fd6ce8f6Sbellard                              int next_offset)
795fd6ce8f6Sbellard {
796fd6ce8f6Sbellard     TranslationBlock *tb1;
797fd6ce8f6Sbellard     for(;;) {
798fd6ce8f6Sbellard         tb1 = *ptb;
799fd6ce8f6Sbellard         if (tb1 == tb) {
800fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
801fd6ce8f6Sbellard             break;
802fd6ce8f6Sbellard         }
803fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
804fd6ce8f6Sbellard     }
805fd6ce8f6Sbellard }
806fd6ce8f6Sbellard 
8079fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8089fa3e853Sbellard {
8099fa3e853Sbellard     TranslationBlock *tb1;
8109fa3e853Sbellard     unsigned int n1;
8119fa3e853Sbellard 
8129fa3e853Sbellard     for(;;) {
8139fa3e853Sbellard         tb1 = *ptb;
8149fa3e853Sbellard         n1 = (long)tb1 & 3;
8159fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8169fa3e853Sbellard         if (tb1 == tb) {
8179fa3e853Sbellard             *ptb = tb1->page_next[n1];
8189fa3e853Sbellard             break;
8199fa3e853Sbellard         }
8209fa3e853Sbellard         ptb = &tb1->page_next[n1];
8219fa3e853Sbellard     }
8229fa3e853Sbellard }
8239fa3e853Sbellard 
824d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
825d4e8164fSbellard {
826d4e8164fSbellard     TranslationBlock *tb1, **ptb;
827d4e8164fSbellard     unsigned int n1;
828d4e8164fSbellard 
829d4e8164fSbellard     ptb = &tb->jmp_next[n];
830d4e8164fSbellard     tb1 = *ptb;
831d4e8164fSbellard     if (tb1) {
832d4e8164fSbellard         /* find tb(n) in circular list */
833d4e8164fSbellard         for(;;) {
834d4e8164fSbellard             tb1 = *ptb;
835d4e8164fSbellard             n1 = (long)tb1 & 3;
836d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
837d4e8164fSbellard             if (n1 == n && tb1 == tb)
838d4e8164fSbellard                 break;
839d4e8164fSbellard             if (n1 == 2) {
840d4e8164fSbellard                 ptb = &tb1->jmp_first;
841d4e8164fSbellard             } else {
842d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
843d4e8164fSbellard             }
844d4e8164fSbellard         }
845d4e8164fSbellard         /* now we can suppress tb(n) from the list */
846d4e8164fSbellard         *ptb = tb->jmp_next[n];
847d4e8164fSbellard 
848d4e8164fSbellard         tb->jmp_next[n] = NULL;
849d4e8164fSbellard     }
850d4e8164fSbellard }
851d4e8164fSbellard 
852d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
853d4e8164fSbellard    another TB */
854d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
855d4e8164fSbellard {
856d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
857d4e8164fSbellard }
858d4e8164fSbellard 
85941c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
860fd6ce8f6Sbellard {
8616a00d601Sbellard     CPUState *env;
862fd6ce8f6Sbellard     PageDesc *p;
8638a40a180Sbellard     unsigned int h, n1;
86441c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
8658a40a180Sbellard     TranslationBlock *tb1, *tb2;
866fd6ce8f6Sbellard 
8679fa3e853Sbellard     /* remove the TB from the hash list */
8689fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8699fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8709fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8719fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8729fa3e853Sbellard 
8739fa3e853Sbellard     /* remove the TB from the page list */
8749fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8759fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8769fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8779fa3e853Sbellard         invalidate_page_bitmap(p);
8789fa3e853Sbellard     }
8799fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8809fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8819fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8829fa3e853Sbellard         invalidate_page_bitmap(p);
8839fa3e853Sbellard     }
8849fa3e853Sbellard 
8858a40a180Sbellard     tb_invalidated_flag = 1;
8868a40a180Sbellard 
8878a40a180Sbellard     /* remove the TB from the hash list */
8888a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8896a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8906a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8916a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8926a00d601Sbellard     }
8938a40a180Sbellard 
8948a40a180Sbellard     /* suppress this TB from the two jump lists */
8958a40a180Sbellard     tb_jmp_remove(tb, 0);
8968a40a180Sbellard     tb_jmp_remove(tb, 1);
8978a40a180Sbellard 
8988a40a180Sbellard     /* suppress any remaining jumps to this TB */
8998a40a180Sbellard     tb1 = tb->jmp_first;
9008a40a180Sbellard     for(;;) {
9018a40a180Sbellard         n1 = (long)tb1 & 3;
9028a40a180Sbellard         if (n1 == 2)
9038a40a180Sbellard             break;
9048a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
9058a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9068a40a180Sbellard         tb_reset_jump(tb1, n1);
9078a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9088a40a180Sbellard         tb1 = tb2;
9098a40a180Sbellard     }
9108a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9118a40a180Sbellard 
912e3db7226Sbellard     tb_phys_invalidate_count++;
9139fa3e853Sbellard }
9149fa3e853Sbellard 
9159fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9169fa3e853Sbellard {
9179fa3e853Sbellard     int end, mask, end1;
9189fa3e853Sbellard 
9199fa3e853Sbellard     end = start + len;
9209fa3e853Sbellard     tab += start >> 3;
9219fa3e853Sbellard     mask = 0xff << (start & 7);
9229fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
9239fa3e853Sbellard         if (start < end) {
9249fa3e853Sbellard             mask &= ~(0xff << (end & 7));
9259fa3e853Sbellard             *tab |= mask;
9269fa3e853Sbellard         }
9279fa3e853Sbellard     } else {
9289fa3e853Sbellard         *tab++ |= mask;
9299fa3e853Sbellard         start = (start + 8) & ~7;
9309fa3e853Sbellard         end1 = end & ~7;
9319fa3e853Sbellard         while (start < end1) {
9329fa3e853Sbellard             *tab++ = 0xff;
9339fa3e853Sbellard             start += 8;
9349fa3e853Sbellard         }
9359fa3e853Sbellard         if (start < end) {
9369fa3e853Sbellard             mask = ~(0xff << (end & 7));
9379fa3e853Sbellard             *tab |= mask;
9389fa3e853Sbellard         }
9399fa3e853Sbellard     }
9409fa3e853Sbellard }
9419fa3e853Sbellard 
9429fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
9439fa3e853Sbellard {
9449fa3e853Sbellard     int n, tb_start, tb_end;
9459fa3e853Sbellard     TranslationBlock *tb;
9469fa3e853Sbellard 
947b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9489fa3e853Sbellard 
9499fa3e853Sbellard     tb = p->first_tb;
9509fa3e853Sbellard     while (tb != NULL) {
9519fa3e853Sbellard         n = (long)tb & 3;
9529fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9539fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9549fa3e853Sbellard         if (n == 0) {
9559fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9569fa3e853Sbellard                it is not a problem */
9579fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
9589fa3e853Sbellard             tb_end = tb_start + tb->size;
9599fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
9609fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
9619fa3e853Sbellard         } else {
9629fa3e853Sbellard             tb_start = 0;
9639fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9649fa3e853Sbellard         }
9659fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9669fa3e853Sbellard         tb = tb->page_next[n];
9679fa3e853Sbellard     }
9689fa3e853Sbellard }
9699fa3e853Sbellard 
9702e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9712e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9722e70f6efSpbrook                               int flags, int cflags)
973d720b93dSbellard {
974d720b93dSbellard     TranslationBlock *tb;
975d720b93dSbellard     uint8_t *tc_ptr;
97641c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
97741c1b1c9SPaul Brook     target_ulong virt_page2;
978d720b93dSbellard     int code_gen_size;
979d720b93dSbellard 
98041c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
981c27004ecSbellard     tb = tb_alloc(pc);
982d720b93dSbellard     if (!tb) {
983d720b93dSbellard         /* flush must be done */
984d720b93dSbellard         tb_flush(env);
985d720b93dSbellard         /* cannot fail at this point */
986c27004ecSbellard         tb = tb_alloc(pc);
9872e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9882e70f6efSpbrook         tb_invalidated_flag = 1;
989d720b93dSbellard     }
990d720b93dSbellard     tc_ptr = code_gen_ptr;
991d720b93dSbellard     tb->tc_ptr = tc_ptr;
992d720b93dSbellard     tb->cs_base = cs_base;
993d720b93dSbellard     tb->flags = flags;
994d720b93dSbellard     tb->cflags = cflags;
995d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
996d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
997d720b93dSbellard 
998d720b93dSbellard     /* check next page if needed */
999c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1000d720b93dSbellard     phys_page2 = -1;
1001c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
100241c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1003d720b93dSbellard     }
100441c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10052e70f6efSpbrook     return tb;
1006d720b93dSbellard }
1007d720b93dSbellard 
10089fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
10099fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
1010d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
1011d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
1012d720b93dSbellard    TB if code is modified inside this TB. */
101341c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1014d720b93dSbellard                                    int is_cpu_write_access)
10159fa3e853Sbellard {
10166b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
1017d720b93dSbellard     CPUState *env = cpu_single_env;
101841c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
10196b917547Saliguori     PageDesc *p;
10206b917547Saliguori     int n;
10216b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
10226b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
10236b917547Saliguori     TranslationBlock *current_tb = NULL;
10246b917547Saliguori     int current_tb_modified = 0;
10256b917547Saliguori     target_ulong current_pc = 0;
10266b917547Saliguori     target_ulong current_cs_base = 0;
10276b917547Saliguori     int current_flags = 0;
10286b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
10299fa3e853Sbellard 
10309fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10319fa3e853Sbellard     if (!p)
10329fa3e853Sbellard         return;
10339fa3e853Sbellard     if (!p->code_bitmap &&
1034d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1035d720b93dSbellard         is_cpu_write_access) {
10369fa3e853Sbellard         /* build code bitmap */
10379fa3e853Sbellard         build_page_bitmap(p);
10389fa3e853Sbellard     }
10399fa3e853Sbellard 
10409fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
10419fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
10429fa3e853Sbellard     tb = p->first_tb;
10439fa3e853Sbellard     while (tb != NULL) {
10449fa3e853Sbellard         n = (long)tb & 3;
10459fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
10469fa3e853Sbellard         tb_next = tb->page_next[n];
10479fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10489fa3e853Sbellard         if (n == 0) {
10499fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10509fa3e853Sbellard                it is not a problem */
10519fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
10529fa3e853Sbellard             tb_end = tb_start + tb->size;
10539fa3e853Sbellard         } else {
10549fa3e853Sbellard             tb_start = tb->page_addr[1];
10559fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10569fa3e853Sbellard         }
10579fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1058d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1059d720b93dSbellard             if (current_tb_not_found) {
1060d720b93dSbellard                 current_tb_not_found = 0;
1061d720b93dSbellard                 current_tb = NULL;
10622e70f6efSpbrook                 if (env->mem_io_pc) {
1063d720b93dSbellard                     /* now we have a real cpu fault */
10642e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1065d720b93dSbellard                 }
1066d720b93dSbellard             }
1067d720b93dSbellard             if (current_tb == tb &&
10682e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1069d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1070d720b93dSbellard                 its execution. We could be more precise by checking
1071d720b93dSbellard                 that the modification is after the current PC, but it
1072d720b93dSbellard                 would require a specialized function to partially
1073d720b93dSbellard                 restore the CPU state */
1074d720b93dSbellard 
1075d720b93dSbellard                 current_tb_modified = 1;
1076618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
10776b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10786b917547Saliguori                                      &current_flags);
1079d720b93dSbellard             }
1080d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10816f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10826f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10836f5a9f7eSbellard             saved_tb = NULL;
10846f5a9f7eSbellard             if (env) {
1085ea1c1802Sbellard                 saved_tb = env->current_tb;
1086ea1c1802Sbellard                 env->current_tb = NULL;
10876f5a9f7eSbellard             }
10889fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10896f5a9f7eSbellard             if (env) {
1090ea1c1802Sbellard                 env->current_tb = saved_tb;
1091ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1092ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10939fa3e853Sbellard             }
10946f5a9f7eSbellard         }
10959fa3e853Sbellard         tb = tb_next;
10969fa3e853Sbellard     }
10979fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10989fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10999fa3e853Sbellard     if (!p->first_tb) {
11009fa3e853Sbellard         invalidate_page_bitmap(p);
1101d720b93dSbellard         if (is_cpu_write_access) {
11022e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103d720b93dSbellard         }
1104d720b93dSbellard     }
1105d720b93dSbellard #endif
1106d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1107d720b93dSbellard     if (current_tb_modified) {
1108d720b93dSbellard         /* we generate a block containing just the instruction
1109d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1110d720b93dSbellard            itself */
1111ea1c1802Sbellard         env->current_tb = NULL;
11122e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113d720b93dSbellard         cpu_resume_from_signal(env, NULL);
11149fa3e853Sbellard     }
11159fa3e853Sbellard #endif
11169fa3e853Sbellard }
11179fa3e853Sbellard 
11189fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
111941c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
11209fa3e853Sbellard {
11219fa3e853Sbellard     PageDesc *p;
11229fa3e853Sbellard     int offset, b;
112359817ccbSbellard #if 0
1124a4193c8aSbellard     if (1) {
112593fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
11262e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1127a4193c8aSbellard                   cpu_single_env->eip,
1128a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1129a4193c8aSbellard     }
113059817ccbSbellard #endif
11319fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11329fa3e853Sbellard     if (!p)
11339fa3e853Sbellard         return;
11349fa3e853Sbellard     if (p->code_bitmap) {
11359fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
11369fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
11379fa3e853Sbellard         if (b & ((1 << len) - 1))
11389fa3e853Sbellard             goto do_invalidate;
11399fa3e853Sbellard     } else {
11409fa3e853Sbellard     do_invalidate:
1141d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
11429fa3e853Sbellard     }
11439fa3e853Sbellard }
11449fa3e853Sbellard 
11459fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
114641c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
1147d720b93dSbellard                                     unsigned long pc, void *puc)
11489fa3e853Sbellard {
11496b917547Saliguori     TranslationBlock *tb;
11509fa3e853Sbellard     PageDesc *p;
11516b917547Saliguori     int n;
1152d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
11536b917547Saliguori     TranslationBlock *current_tb = NULL;
1154d720b93dSbellard     CPUState *env = cpu_single_env;
11556b917547Saliguori     int current_tb_modified = 0;
11566b917547Saliguori     target_ulong current_pc = 0;
11576b917547Saliguori     target_ulong current_cs_base = 0;
11586b917547Saliguori     int current_flags = 0;
1159d720b93dSbellard #endif
11609fa3e853Sbellard 
11619fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
11629fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1163fd6ce8f6Sbellard     if (!p)
1164fd6ce8f6Sbellard         return;
1165fd6ce8f6Sbellard     tb = p->first_tb;
1166d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1167d720b93dSbellard     if (tb && pc != 0) {
1168d720b93dSbellard         current_tb = tb_find_pc(pc);
1169d720b93dSbellard     }
1170d720b93dSbellard #endif
1171fd6ce8f6Sbellard     while (tb != NULL) {
11729fa3e853Sbellard         n = (long)tb & 3;
11739fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1174d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1175d720b93dSbellard         if (current_tb == tb &&
11762e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1177d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1178d720b93dSbellard                    its execution. We could be more precise by checking
1179d720b93dSbellard                    that the modification is after the current PC, but it
1180d720b93dSbellard                    would require a specialized function to partially
1181d720b93dSbellard                    restore the CPU state */
1182d720b93dSbellard 
1183d720b93dSbellard             current_tb_modified = 1;
1184618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
11856b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11866b917547Saliguori                                  &current_flags);
1187d720b93dSbellard         }
1188d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11899fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11909fa3e853Sbellard         tb = tb->page_next[n];
1191fd6ce8f6Sbellard     }
1192fd6ce8f6Sbellard     p->first_tb = NULL;
1193d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1194d720b93dSbellard     if (current_tb_modified) {
1195d720b93dSbellard         /* we generate a block containing just the instruction
1196d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1197d720b93dSbellard            itself */
1198ea1c1802Sbellard         env->current_tb = NULL;
11992e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1200d720b93dSbellard         cpu_resume_from_signal(env, puc);
1201d720b93dSbellard     }
1202d720b93dSbellard #endif
1203fd6ce8f6Sbellard }
12049fa3e853Sbellard #endif
1205fd6ce8f6Sbellard 
1206fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
12079fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
120841c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1209fd6ce8f6Sbellard {
1210fd6ce8f6Sbellard     PageDesc *p;
12119fa3e853Sbellard     TranslationBlock *last_first_tb;
12129fa3e853Sbellard 
12139fa3e853Sbellard     tb->page_addr[n] = page_addr;
12145cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
12159fa3e853Sbellard     tb->page_next[n] = p->first_tb;
12169fa3e853Sbellard     last_first_tb = p->first_tb;
12179fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
12189fa3e853Sbellard     invalidate_page_bitmap(p);
12199fa3e853Sbellard 
1220107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1221d720b93dSbellard 
12229fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
12239fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
122453a5960aSpbrook         target_ulong addr;
122553a5960aSpbrook         PageDesc *p2;
1226fd6ce8f6Sbellard         int prot;
1227fd6ce8f6Sbellard 
1228fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1229fd6ce8f6Sbellard            page fault + mprotect overhead) */
123053a5960aSpbrook         page_addr &= qemu_host_page_mask;
1231fd6ce8f6Sbellard         prot = 0;
123253a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
123353a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
123453a5960aSpbrook 
123553a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
123653a5960aSpbrook             if (!p2)
123753a5960aSpbrook                 continue;
123853a5960aSpbrook             prot |= p2->flags;
123953a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
124053a5960aSpbrook           }
124153a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1242fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1243fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1244ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
124553a5960aSpbrook                page_addr);
1246fd6ce8f6Sbellard #endif
1247fd6ce8f6Sbellard     }
12489fa3e853Sbellard #else
12499fa3e853Sbellard     /* if some code is already present, then the pages are already
12509fa3e853Sbellard        protected. So we handle the case where only the first TB is
12519fa3e853Sbellard        allocated in a physical page */
12529fa3e853Sbellard     if (!last_first_tb) {
12536a00d601Sbellard         tlb_protect_code(page_addr);
12549fa3e853Sbellard     }
12559fa3e853Sbellard #endif
1256d720b93dSbellard 
1257d720b93dSbellard #endif /* TARGET_HAS_SMC */
1258fd6ce8f6Sbellard }
1259fd6ce8f6Sbellard 
12609fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12619fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
126241c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
126341c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1264d4e8164fSbellard {
12659fa3e853Sbellard     unsigned int h;
12669fa3e853Sbellard     TranslationBlock **ptb;
12679fa3e853Sbellard 
1268c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1269c8a706feSpbrook        before we are done.  */
1270c8a706feSpbrook     mmap_lock();
12719fa3e853Sbellard     /* add in the physical hash table */
12729fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12739fa3e853Sbellard     ptb = &tb_phys_hash[h];
12749fa3e853Sbellard     tb->phys_hash_next = *ptb;
12759fa3e853Sbellard     *ptb = tb;
1276fd6ce8f6Sbellard 
1277fd6ce8f6Sbellard     /* add in the page list */
12789fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12799fa3e853Sbellard     if (phys_page2 != -1)
12809fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12819fa3e853Sbellard     else
12829fa3e853Sbellard         tb->page_addr[1] = -1;
12839fa3e853Sbellard 
1284d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1285d4e8164fSbellard     tb->jmp_next[0] = NULL;
1286d4e8164fSbellard     tb->jmp_next[1] = NULL;
1287d4e8164fSbellard 
1288d4e8164fSbellard     /* init original jump addresses */
1289d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1290d4e8164fSbellard         tb_reset_jump(tb, 0);
1291d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1292d4e8164fSbellard         tb_reset_jump(tb, 1);
12938a40a180Sbellard 
12948a40a180Sbellard #ifdef DEBUG_TB_CHECK
12958a40a180Sbellard     tb_page_check();
12968a40a180Sbellard #endif
1297c8a706feSpbrook     mmap_unlock();
1298fd6ce8f6Sbellard }
1299fd6ce8f6Sbellard 
1300a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1301a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1302a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1303a513fe19Sbellard {
1304a513fe19Sbellard     int m_min, m_max, m;
1305a513fe19Sbellard     unsigned long v;
1306a513fe19Sbellard     TranslationBlock *tb;
1307a513fe19Sbellard 
1308a513fe19Sbellard     if (nb_tbs <= 0)
1309a513fe19Sbellard         return NULL;
1310a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1311a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1312a513fe19Sbellard         return NULL;
1313a513fe19Sbellard     /* binary search (cf Knuth) */
1314a513fe19Sbellard     m_min = 0;
1315a513fe19Sbellard     m_max = nb_tbs - 1;
1316a513fe19Sbellard     while (m_min <= m_max) {
1317a513fe19Sbellard         m = (m_min + m_max) >> 1;
1318a513fe19Sbellard         tb = &tbs[m];
1319a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1320a513fe19Sbellard         if (v == tc_ptr)
1321a513fe19Sbellard             return tb;
1322a513fe19Sbellard         else if (tc_ptr < v) {
1323a513fe19Sbellard             m_max = m - 1;
1324a513fe19Sbellard         } else {
1325a513fe19Sbellard             m_min = m + 1;
1326a513fe19Sbellard         }
1327a513fe19Sbellard     }
1328a513fe19Sbellard     return &tbs[m_max];
1329a513fe19Sbellard }
13307501267eSbellard 
1331ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1332ea041c0eSbellard 
1333ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1334ea041c0eSbellard {
1335ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1336ea041c0eSbellard     unsigned int n1;
1337ea041c0eSbellard 
1338ea041c0eSbellard     tb1 = tb->jmp_next[n];
1339ea041c0eSbellard     if (tb1 != NULL) {
1340ea041c0eSbellard         /* find head of list */
1341ea041c0eSbellard         for(;;) {
1342ea041c0eSbellard             n1 = (long)tb1 & 3;
1343ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1344ea041c0eSbellard             if (n1 == 2)
1345ea041c0eSbellard                 break;
1346ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1347ea041c0eSbellard         }
1348ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1349ea041c0eSbellard         tb_next = tb1;
1350ea041c0eSbellard 
1351ea041c0eSbellard         /* remove tb from the jmp_first list */
1352ea041c0eSbellard         ptb = &tb_next->jmp_first;
1353ea041c0eSbellard         for(;;) {
1354ea041c0eSbellard             tb1 = *ptb;
1355ea041c0eSbellard             n1 = (long)tb1 & 3;
1356ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1357ea041c0eSbellard             if (n1 == n && tb1 == tb)
1358ea041c0eSbellard                 break;
1359ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1360ea041c0eSbellard         }
1361ea041c0eSbellard         *ptb = tb->jmp_next[n];
1362ea041c0eSbellard         tb->jmp_next[n] = NULL;
1363ea041c0eSbellard 
1364ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1365ea041c0eSbellard         tb_reset_jump(tb, n);
1366ea041c0eSbellard 
13670124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1368ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1369ea041c0eSbellard     }
1370ea041c0eSbellard }
1371ea041c0eSbellard 
1372ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1373ea041c0eSbellard {
1374ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1375ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1376ea041c0eSbellard }
1377ea041c0eSbellard 
13781fddef4bSbellard #if defined(TARGET_HAS_ICE)
137994df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
138094df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
138194df27fdSPaul Brook {
138294df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
138394df27fdSPaul Brook }
138494df27fdSPaul Brook #else
1385d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1386d720b93dSbellard {
1387c227f099SAnthony Liguori     target_phys_addr_t addr;
13889b3c35e0Sj_mayer     target_ulong pd;
1389c227f099SAnthony Liguori     ram_addr_t ram_addr;
1390c2f07f81Spbrook     PhysPageDesc *p;
1391d720b93dSbellard 
1392c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1393c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1394c2f07f81Spbrook     if (!p) {
1395c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1396c2f07f81Spbrook     } else {
1397c2f07f81Spbrook         pd = p->phys_offset;
1398c2f07f81Spbrook     }
1399c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1400706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1401d720b93dSbellard }
1402c27004ecSbellard #endif
140394df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1404d720b93dSbellard 
1405c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1406c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1407c527ee8fSPaul Brook 
1408c527ee8fSPaul Brook {
1409c527ee8fSPaul Brook }
1410c527ee8fSPaul Brook 
1411c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1412c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1413c527ee8fSPaul Brook {
1414c527ee8fSPaul Brook     return -ENOSYS;
1415c527ee8fSPaul Brook }
1416c527ee8fSPaul Brook #else
14176658ffb8Spbrook /* Add a watchpoint.  */
1418a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1419a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
14206658ffb8Spbrook {
1421b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1422c0ce998eSaliguori     CPUWatchpoint *wp;
14236658ffb8Spbrook 
1424b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1425b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1426b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1427b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1428b4051334Saliguori         return -EINVAL;
1429b4051334Saliguori     }
1430a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
14316658ffb8Spbrook 
1432a1d1bb31Saliguori     wp->vaddr = addr;
1433b4051334Saliguori     wp->len_mask = len_mask;
1434a1d1bb31Saliguori     wp->flags = flags;
1435a1d1bb31Saliguori 
14362dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1437c0ce998eSaliguori     if (flags & BP_GDB)
143872cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1439c0ce998eSaliguori     else
144072cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1441a1d1bb31Saliguori 
14426658ffb8Spbrook     tlb_flush_page(env, addr);
1443a1d1bb31Saliguori 
1444a1d1bb31Saliguori     if (watchpoint)
1445a1d1bb31Saliguori         *watchpoint = wp;
1446a1d1bb31Saliguori     return 0;
14476658ffb8Spbrook }
14486658ffb8Spbrook 
1449a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1450a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1451a1d1bb31Saliguori                           int flags)
14526658ffb8Spbrook {
1453b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1454a1d1bb31Saliguori     CPUWatchpoint *wp;
14556658ffb8Spbrook 
145672cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1457b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14586e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1459a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14606658ffb8Spbrook             return 0;
14616658ffb8Spbrook         }
14626658ffb8Spbrook     }
1463a1d1bb31Saliguori     return -ENOENT;
14646658ffb8Spbrook }
14656658ffb8Spbrook 
1466a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1467a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1468a1d1bb31Saliguori {
146972cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14707d03f82fSedgar_igl 
1471a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1472a1d1bb31Saliguori 
1473a1d1bb31Saliguori     qemu_free(watchpoint);
14747d03f82fSedgar_igl }
14757d03f82fSedgar_igl 
1476a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1477a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1478a1d1bb31Saliguori {
1479c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1480a1d1bb31Saliguori 
148172cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1482a1d1bb31Saliguori         if (wp->flags & mask)
1483a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1484a1d1bb31Saliguori     }
1485c0ce998eSaliguori }
1486c527ee8fSPaul Brook #endif
1487a1d1bb31Saliguori 
1488a1d1bb31Saliguori /* Add a breakpoint.  */
1489a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1490a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14914c3a88a2Sbellard {
14921fddef4bSbellard #if defined(TARGET_HAS_ICE)
1493c0ce998eSaliguori     CPUBreakpoint *bp;
14944c3a88a2Sbellard 
1495a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14964c3a88a2Sbellard 
1497a1d1bb31Saliguori     bp->pc = pc;
1498a1d1bb31Saliguori     bp->flags = flags;
1499a1d1bb31Saliguori 
15002dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1501c0ce998eSaliguori     if (flags & BP_GDB)
150272cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1503c0ce998eSaliguori     else
150472cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1505d720b93dSbellard 
1506d720b93dSbellard     breakpoint_invalidate(env, pc);
1507a1d1bb31Saliguori 
1508a1d1bb31Saliguori     if (breakpoint)
1509a1d1bb31Saliguori         *breakpoint = bp;
15104c3a88a2Sbellard     return 0;
15114c3a88a2Sbellard #else
1512a1d1bb31Saliguori     return -ENOSYS;
15134c3a88a2Sbellard #endif
15144c3a88a2Sbellard }
15154c3a88a2Sbellard 
1516a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1517a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1518a1d1bb31Saliguori {
15197d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1520a1d1bb31Saliguori     CPUBreakpoint *bp;
1521a1d1bb31Saliguori 
152272cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1523a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1524a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1525a1d1bb31Saliguori             return 0;
15267d03f82fSedgar_igl         }
1527a1d1bb31Saliguori     }
1528a1d1bb31Saliguori     return -ENOENT;
1529a1d1bb31Saliguori #else
1530a1d1bb31Saliguori     return -ENOSYS;
15317d03f82fSedgar_igl #endif
15327d03f82fSedgar_igl }
15337d03f82fSedgar_igl 
1534a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1535a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
15364c3a88a2Sbellard {
15371fddef4bSbellard #if defined(TARGET_HAS_ICE)
153872cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1539d720b93dSbellard 
1540a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1541a1d1bb31Saliguori 
1542a1d1bb31Saliguori     qemu_free(breakpoint);
1543a1d1bb31Saliguori #endif
1544a1d1bb31Saliguori }
1545a1d1bb31Saliguori 
1546a1d1bb31Saliguori /* Remove all matching breakpoints. */
1547a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1548a1d1bb31Saliguori {
1549a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1550c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1551a1d1bb31Saliguori 
155272cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1553a1d1bb31Saliguori         if (bp->flags & mask)
1554a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1555c0ce998eSaliguori     }
15564c3a88a2Sbellard #endif
15574c3a88a2Sbellard }
15584c3a88a2Sbellard 
1559c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1560c33a346eSbellard    CPU loop after each instruction */
1561c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1562c33a346eSbellard {
15631fddef4bSbellard #if defined(TARGET_HAS_ICE)
1564c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1565c33a346eSbellard         env->singlestep_enabled = enabled;
1566e22a25c9Saliguori         if (kvm_enabled())
1567e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1568e22a25c9Saliguori         else {
1569ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15709fa3e853Sbellard             /* XXX: only flush what is necessary */
15710124311eSbellard             tb_flush(env);
1572c33a346eSbellard         }
1573e22a25c9Saliguori     }
1574c33a346eSbellard #endif
1575c33a346eSbellard }
1576c33a346eSbellard 
157734865134Sbellard /* enable or disable low levels log */
157834865134Sbellard void cpu_set_log(int log_flags)
157934865134Sbellard {
158034865134Sbellard     loglevel = log_flags;
158134865134Sbellard     if (loglevel && !logfile) {
158211fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
158334865134Sbellard         if (!logfile) {
158434865134Sbellard             perror(logfilename);
158534865134Sbellard             _exit(1);
158634865134Sbellard         }
15879fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15889fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15899fa3e853Sbellard         {
1590b55266b5Sblueswir1             static char logfile_buf[4096];
15919fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15929fa3e853Sbellard         }
1593bf65f53fSFilip Navara #elif !defined(_WIN32)
1594bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
159534865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15969fa3e853Sbellard #endif
1597e735b91cSpbrook         log_append = 1;
1598e735b91cSpbrook     }
1599e735b91cSpbrook     if (!loglevel && logfile) {
1600e735b91cSpbrook         fclose(logfile);
1601e735b91cSpbrook         logfile = NULL;
160234865134Sbellard     }
160334865134Sbellard }
160434865134Sbellard 
160534865134Sbellard void cpu_set_log_filename(const char *filename)
160634865134Sbellard {
160734865134Sbellard     logfilename = strdup(filename);
1608e735b91cSpbrook     if (logfile) {
1609e735b91cSpbrook         fclose(logfile);
1610e735b91cSpbrook         logfile = NULL;
1611e735b91cSpbrook     }
1612e735b91cSpbrook     cpu_set_log(loglevel);
161334865134Sbellard }
1614c33a346eSbellard 
16153098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1616ea041c0eSbellard {
1617d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1618d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1619d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1620d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16213098dba0Saurel32     TranslationBlock *tb;
1622c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16233098dba0Saurel32 
1624cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16253098dba0Saurel32     tb = env->current_tb;
16263098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16273098dba0Saurel32        all the potentially executing TB */
1628f76cfe56SRiku Voipio     if (tb) {
16293098dba0Saurel32         env->current_tb = NULL;
16303098dba0Saurel32         tb_reset_jump_recursive(tb);
16313098dba0Saurel32     }
1632cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16333098dba0Saurel32 }
16343098dba0Saurel32 
163597ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
16363098dba0Saurel32 /* mask must never be zero, except for A20 change call */
1637ec6959d0SJan Kiszka static void tcg_handle_interrupt(CPUState *env, int mask)
16383098dba0Saurel32 {
16393098dba0Saurel32     int old_mask;
16403098dba0Saurel32 
16413098dba0Saurel32     old_mask = env->interrupt_request;
16423098dba0Saurel32     env->interrupt_request |= mask;
16433098dba0Saurel32 
16448edac960Saliguori     /*
16458edac960Saliguori      * If called from iothread context, wake the target cpu in
16468edac960Saliguori      * case its halted.
16478edac960Saliguori      */
1648b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
16498edac960Saliguori         qemu_cpu_kick(env);
16508edac960Saliguori         return;
16518edac960Saliguori     }
16528edac960Saliguori 
16532e70f6efSpbrook     if (use_icount) {
1654266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16552e70f6efSpbrook         if (!can_do_io(env)
1656be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16572e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16582e70f6efSpbrook         }
16592e70f6efSpbrook     } else {
16603098dba0Saurel32         cpu_unlink_tb(env);
1661ea041c0eSbellard     }
16622e70f6efSpbrook }
1663ea041c0eSbellard 
1664ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1665ec6959d0SJan Kiszka 
166697ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
166797ffbd8dSJan Kiszka 
166897ffbd8dSJan Kiszka void cpu_interrupt(CPUState *env, int mask)
166997ffbd8dSJan Kiszka {
167097ffbd8dSJan Kiszka     env->interrupt_request |= mask;
167197ffbd8dSJan Kiszka     cpu_unlink_tb(env);
167297ffbd8dSJan Kiszka }
167397ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
167497ffbd8dSJan Kiszka 
1675b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1676b54ad049Sbellard {
1677b54ad049Sbellard     env->interrupt_request &= ~mask;
1678b54ad049Sbellard }
1679b54ad049Sbellard 
16803098dba0Saurel32 void cpu_exit(CPUState *env)
16813098dba0Saurel32 {
16823098dba0Saurel32     env->exit_request = 1;
16833098dba0Saurel32     cpu_unlink_tb(env);
16843098dba0Saurel32 }
16853098dba0Saurel32 
1686c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1687f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1688f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1689f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1690f193c797Sbellard       "show target assembly code for each compiled TB" },
1691f193c797Sbellard     { CPU_LOG_TB_OP, "op",
169257fec1feSbellard       "show micro ops for each compiled TB" },
1693f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1694e01a1157Sblueswir1       "show micro ops "
1695e01a1157Sblueswir1 #ifdef TARGET_I386
1696e01a1157Sblueswir1       "before eflags optimization and "
1697f193c797Sbellard #endif
1698e01a1157Sblueswir1       "after liveness analysis" },
1699f193c797Sbellard     { CPU_LOG_INT, "int",
1700f193c797Sbellard       "show interrupts/exceptions in short format" },
1701f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1702f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
17039fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1704e91c8a77Sths       "show CPU state before block translation" },
1705f193c797Sbellard #ifdef TARGET_I386
1706f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1707f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1708eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1709eca1bdf4Saliguori       "show CPU state before CPU resets" },
1710f193c797Sbellard #endif
17118e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1712fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1713fd872598Sbellard       "show all i/o ports accesses" },
17148e3a9fd2Sbellard #endif
1715f193c797Sbellard     { 0, NULL, NULL },
1716f193c797Sbellard };
1717f193c797Sbellard 
1718f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1719f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1720f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1721f6f3fbcaSMichael S. Tsirkin 
1722f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1723f6f3fbcaSMichael S. Tsirkin                                   ram_addr_t size,
17240fd542fbSMichael S. Tsirkin                                   ram_addr_t phys_offset,
17250fd542fbSMichael S. Tsirkin                                   bool log_dirty)
1726f6f3fbcaSMichael S. Tsirkin {
1727f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1728f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
17290fd542fbSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1730f6f3fbcaSMichael S. Tsirkin     }
1731f6f3fbcaSMichael S. Tsirkin }
1732f6f3fbcaSMichael S. Tsirkin 
1733f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1734f6f3fbcaSMichael S. Tsirkin                                         target_phys_addr_t end)
1735f6f3fbcaSMichael S. Tsirkin {
1736f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1737f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1738f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1739f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1740f6f3fbcaSMichael S. Tsirkin             return r;
1741f6f3fbcaSMichael S. Tsirkin     }
1742f6f3fbcaSMichael S. Tsirkin     return 0;
1743f6f3fbcaSMichael S. Tsirkin }
1744f6f3fbcaSMichael S. Tsirkin 
1745f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1746f6f3fbcaSMichael S. Tsirkin {
1747f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1748f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1749f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1750f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1751f6f3fbcaSMichael S. Tsirkin             return r;
1752f6f3fbcaSMichael S. Tsirkin     }
1753f6f3fbcaSMichael S. Tsirkin     return 0;
1754f6f3fbcaSMichael S. Tsirkin }
1755f6f3fbcaSMichael S. Tsirkin 
17568d4c78e7SAlex Williamson /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
17578d4c78e7SAlex Williamson  * address.  Each intermediate table provides the next L2_BITs of guest
17588d4c78e7SAlex Williamson  * physical address space.  The number of levels vary based on host and
17598d4c78e7SAlex Williamson  * guest configuration, making it efficient to build the final guest
17608d4c78e7SAlex Williamson  * physical address by seeding the L1 offset and shifting and adding in
17618d4c78e7SAlex Williamson  * each L2 offset as we recurse through them. */
17625cd2c5b6SRichard Henderson static void phys_page_for_each_1(CPUPhysMemoryClient *client,
17638d4c78e7SAlex Williamson                                  int level, void **lp, target_phys_addr_t addr)
1764f6f3fbcaSMichael S. Tsirkin {
17655cd2c5b6SRichard Henderson     int i;
1766f6f3fbcaSMichael S. Tsirkin 
17675cd2c5b6SRichard Henderson     if (*lp == NULL) {
17685cd2c5b6SRichard Henderson         return;
1769f6f3fbcaSMichael S. Tsirkin     }
17705cd2c5b6SRichard Henderson     if (level == 0) {
17715cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17728d4c78e7SAlex Williamson         addr <<= L2_BITS + TARGET_PAGE_BITS;
17737296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17745cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17758d4c78e7SAlex Williamson                 client->set_memory(client, addr | i << TARGET_PAGE_BITS,
17760fd542fbSMichael S. Tsirkin                                    TARGET_PAGE_SIZE, pd[i].phys_offset, false);
1777f6f3fbcaSMichael S. Tsirkin             }
17785cd2c5b6SRichard Henderson         }
17795cd2c5b6SRichard Henderson     } else {
17805cd2c5b6SRichard Henderson         void **pp = *lp;
17817296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17828d4c78e7SAlex Williamson             phys_page_for_each_1(client, level - 1, pp + i,
17838d4c78e7SAlex Williamson                                  (addr << L2_BITS) | i);
1784f6f3fbcaSMichael S. Tsirkin         }
1785f6f3fbcaSMichael S. Tsirkin     }
1786f6f3fbcaSMichael S. Tsirkin }
1787f6f3fbcaSMichael S. Tsirkin 
1788f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1789f6f3fbcaSMichael S. Tsirkin {
17905cd2c5b6SRichard Henderson     int i;
17915cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
17925cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
17938d4c78e7SAlex Williamson                              l1_phys_map + i, i);
1794f6f3fbcaSMichael S. Tsirkin     }
1795f6f3fbcaSMichael S. Tsirkin }
1796f6f3fbcaSMichael S. Tsirkin 
1797f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1798f6f3fbcaSMichael S. Tsirkin {
1799f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1800f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1801f6f3fbcaSMichael S. Tsirkin }
1802f6f3fbcaSMichael S. Tsirkin 
1803f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1804f6f3fbcaSMichael S. Tsirkin {
1805f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1806f6f3fbcaSMichael S. Tsirkin }
1807f6f3fbcaSMichael S. Tsirkin #endif
1808f6f3fbcaSMichael S. Tsirkin 
1809f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1810f193c797Sbellard {
1811f193c797Sbellard     if (strlen(s2) != n)
1812f193c797Sbellard         return 0;
1813f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1814f193c797Sbellard }
1815f193c797Sbellard 
1816f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1817f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1818f193c797Sbellard {
1819c7cd6a37Sblueswir1     const CPULogItem *item;
1820f193c797Sbellard     int mask;
1821f193c797Sbellard     const char *p, *p1;
1822f193c797Sbellard 
1823f193c797Sbellard     p = str;
1824f193c797Sbellard     mask = 0;
1825f193c797Sbellard     for(;;) {
1826f193c797Sbellard         p1 = strchr(p, ',');
1827f193c797Sbellard         if (!p1)
1828f193c797Sbellard             p1 = p + strlen(p);
18298e3a9fd2Sbellard         if(cmp1(p,p1-p,"all")) {
18308e3a9fd2Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
18318e3a9fd2Sbellard                 mask |= item->mask;
18328e3a9fd2Sbellard             }
18338e3a9fd2Sbellard         } else {
1834f193c797Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
1835f193c797Sbellard                 if (cmp1(p, p1 - p, item->name))
1836f193c797Sbellard                     goto found;
1837f193c797Sbellard             }
1838f193c797Sbellard             return 0;
18398e3a9fd2Sbellard         }
1840f193c797Sbellard     found:
1841f193c797Sbellard         mask |= item->mask;
1842f193c797Sbellard         if (*p1 != ',')
1843f193c797Sbellard             break;
1844f193c797Sbellard         p = p1 + 1;
1845f193c797Sbellard     }
1846f193c797Sbellard     return mask;
1847f193c797Sbellard }
1848ea041c0eSbellard 
18497501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
18507501267eSbellard {
18517501267eSbellard     va_list ap;
1852493ae1f0Spbrook     va_list ap2;
18537501267eSbellard 
18547501267eSbellard     va_start(ap, fmt);
1855493ae1f0Spbrook     va_copy(ap2, ap);
18567501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18577501267eSbellard     vfprintf(stderr, fmt, ap);
18587501267eSbellard     fprintf(stderr, "\n");
18597501267eSbellard #ifdef TARGET_I386
18607fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
18617fe48483Sbellard #else
18627fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
18637501267eSbellard #endif
186493fcfe39Saliguori     if (qemu_log_enabled()) {
186593fcfe39Saliguori         qemu_log("qemu: fatal: ");
186693fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
186793fcfe39Saliguori         qemu_log("\n");
1868f9373291Sj_mayer #ifdef TARGET_I386
186993fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1870f9373291Sj_mayer #else
187193fcfe39Saliguori         log_cpu_state(env, 0);
1872f9373291Sj_mayer #endif
187331b1a7b4Saliguori         qemu_log_flush();
187493fcfe39Saliguori         qemu_log_close();
1875924edcaeSbalrog     }
1876493ae1f0Spbrook     va_end(ap2);
1877f9373291Sj_mayer     va_end(ap);
1878fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1879fd052bf6SRiku Voipio     {
1880fd052bf6SRiku Voipio         struct sigaction act;
1881fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1882fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1883fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1884fd052bf6SRiku Voipio     }
1885fd052bf6SRiku Voipio #endif
18867501267eSbellard     abort();
18877501267eSbellard }
18887501267eSbellard 
1889c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1890c5be9f08Sths {
189101ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1892c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1893c5be9f08Sths     int cpu_index = new_env->cpu_index;
18945a38f081Saliguori #if defined(TARGET_HAS_ICE)
18955a38f081Saliguori     CPUBreakpoint *bp;
18965a38f081Saliguori     CPUWatchpoint *wp;
18975a38f081Saliguori #endif
18985a38f081Saliguori 
1899c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
19005a38f081Saliguori 
19015a38f081Saliguori     /* Preserve chaining and index. */
1902c5be9f08Sths     new_env->next_cpu = next_cpu;
1903c5be9f08Sths     new_env->cpu_index = cpu_index;
19045a38f081Saliguori 
19055a38f081Saliguori     /* Clone all break/watchpoints.
19065a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
19075a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
190872cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
190972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
19105a38f081Saliguori #if defined(TARGET_HAS_ICE)
191172cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
19125a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
19135a38f081Saliguori     }
191472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
19155a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
19165a38f081Saliguori                               wp->flags, NULL);
19175a38f081Saliguori     }
19185a38f081Saliguori #endif
19195a38f081Saliguori 
1920c5be9f08Sths     return new_env;
1921c5be9f08Sths }
1922c5be9f08Sths 
19230124311eSbellard #if !defined(CONFIG_USER_ONLY)
19240124311eSbellard 
19255c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
19265c751e99Sedgar_igl {
19275c751e99Sedgar_igl     unsigned int i;
19285c751e99Sedgar_igl 
19295c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
19305c751e99Sedgar_igl        overlap the flushed page.  */
19315c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
19325c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19335c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19345c751e99Sedgar_igl 
19355c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
19365c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19375c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19385c751e99Sedgar_igl }
19395c751e99Sedgar_igl 
194008738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
194108738984SIgor Kovalenko     .addr_read  = -1,
194208738984SIgor Kovalenko     .addr_write = -1,
194308738984SIgor Kovalenko     .addr_code  = -1,
194408738984SIgor Kovalenko     .addend     = -1,
194508738984SIgor Kovalenko };
194608738984SIgor Kovalenko 
1947ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1948ee8b7021Sbellard    implemented yet) */
1949ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
195033417e70Sbellard {
195133417e70Sbellard     int i;
19520124311eSbellard 
19539fa3e853Sbellard #if defined(DEBUG_TLB)
19549fa3e853Sbellard     printf("tlb_flush:\n");
19559fa3e853Sbellard #endif
19560124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19570124311eSbellard        links while we are modifying them */
19580124311eSbellard     env->current_tb = NULL;
19590124311eSbellard 
196033417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1961cfde4bd9SIsaku Yamahata         int mmu_idx;
1962cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
196308738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1964cfde4bd9SIsaku Yamahata         }
196533417e70Sbellard     }
19669fa3e853Sbellard 
19678a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
19689fa3e853Sbellard 
1969d4c430a8SPaul Brook     env->tlb_flush_addr = -1;
1970d4c430a8SPaul Brook     env->tlb_flush_mask = 0;
1971e3db7226Sbellard     tlb_flush_count++;
197233417e70Sbellard }
197333417e70Sbellard 
1974274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
197561382a50Sbellard {
197684b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
197784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
197884b7b8e7Sbellard         addr == (tlb_entry->addr_write &
197984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
198084b7b8e7Sbellard         addr == (tlb_entry->addr_code &
198184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
198208738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
198384b7b8e7Sbellard     }
198461382a50Sbellard }
198561382a50Sbellard 
19862e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
198733417e70Sbellard {
19888a40a180Sbellard     int i;
1989cfde4bd9SIsaku Yamahata     int mmu_idx;
19900124311eSbellard 
19919fa3e853Sbellard #if defined(DEBUG_TLB)
1992108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
19939fa3e853Sbellard #endif
1994d4c430a8SPaul Brook     /* Check if we need to flush due to large pages.  */
1995d4c430a8SPaul Brook     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1996d4c430a8SPaul Brook #if defined(DEBUG_TLB)
1997d4c430a8SPaul Brook         printf("tlb_flush_page: forced full flush ("
1998d4c430a8SPaul Brook                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1999d4c430a8SPaul Brook                env->tlb_flush_addr, env->tlb_flush_mask);
2000d4c430a8SPaul Brook #endif
2001d4c430a8SPaul Brook         tlb_flush(env, 1);
2002d4c430a8SPaul Brook         return;
2003d4c430a8SPaul Brook     }
20040124311eSbellard     /* must reset current TB so that interrupts cannot modify the
20050124311eSbellard        links while we are modifying them */
20060124311eSbellard     env->current_tb = NULL;
200733417e70Sbellard 
200861382a50Sbellard     addr &= TARGET_PAGE_MASK;
200933417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2011cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
20120124311eSbellard 
20135c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
20149fa3e853Sbellard }
20159fa3e853Sbellard 
20169fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
20179fa3e853Sbellard    can be detected */
2018c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
201961382a50Sbellard {
20206a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
20216a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
20226a00d601Sbellard                                     CODE_DIRTY_FLAG);
20239fa3e853Sbellard }
20249fa3e853Sbellard 
20259fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
20263a7d929eSbellard    tested for self modifying code */
2027c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
20283a7d929eSbellard                                     target_ulong vaddr)
20299fa3e853Sbellard {
2030f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
20319fa3e853Sbellard }
20329fa3e853Sbellard 
20331ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
20341ccde1cbSbellard                                          unsigned long start, unsigned long length)
20351ccde1cbSbellard {
20361ccde1cbSbellard     unsigned long addr;
203784b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
203884b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
20391ccde1cbSbellard         if ((addr - start) < length) {
20400f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
20411ccde1cbSbellard         }
20421ccde1cbSbellard     }
20431ccde1cbSbellard }
20441ccde1cbSbellard 
20455579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
2046c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
20470a962c02Sbellard                                      int dirty_flags)
20481ccde1cbSbellard {
20491ccde1cbSbellard     CPUState *env;
20504f2ac237Sbellard     unsigned long length, start1;
2051f7c11b53SYoshiaki Tamura     int i;
20521ccde1cbSbellard 
20531ccde1cbSbellard     start &= TARGET_PAGE_MASK;
20541ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
20551ccde1cbSbellard 
20561ccde1cbSbellard     length = end - start;
20571ccde1cbSbellard     if (length == 0)
20581ccde1cbSbellard         return;
2059f7c11b53SYoshiaki Tamura     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2060f23db169Sbellard 
20611ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
20621ccde1cbSbellard        when accessing the range */
2063b2e0a138SMichael S. Tsirkin     start1 = (unsigned long)qemu_safe_ram_ptr(start);
2064a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
20655579c7f3Spbrook        address comparisons below.  */
2066b2e0a138SMichael S. Tsirkin     if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
20675579c7f3Spbrook             != (end - 1) - start) {
20685579c7f3Spbrook         abort();
20695579c7f3Spbrook     }
20705579c7f3Spbrook 
20716a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2072cfde4bd9SIsaku Yamahata         int mmu_idx;
2073cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
20741ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2075cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2076cfde4bd9SIsaku Yamahata                                       start1, length);
2077cfde4bd9SIsaku Yamahata         }
20786a00d601Sbellard     }
20791ccde1cbSbellard }
20801ccde1cbSbellard 
208174576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
208274576198Saliguori {
2083f6f3fbcaSMichael S. Tsirkin     int ret = 0;
208474576198Saliguori     in_migration = enable;
2085f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2086f6f3fbcaSMichael S. Tsirkin     return ret;
208774576198Saliguori }
208874576198Saliguori 
208974576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
209074576198Saliguori {
209174576198Saliguori     return in_migration;
209274576198Saliguori }
209374576198Saliguori 
2094c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2095c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
20962bec46dcSaliguori {
20977b8f3b78SMichael S. Tsirkin     int ret;
2098151f7749SJan Kiszka 
2099f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2100151f7749SJan Kiszka     return ret;
21012bec46dcSaliguori }
21022bec46dcSaliguori 
2103e5896b12SAnthony PERARD int cpu_physical_log_start(target_phys_addr_t start_addr,
2104e5896b12SAnthony PERARD                            ram_addr_t size)
2105e5896b12SAnthony PERARD {
2106e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2107e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2108e5896b12SAnthony PERARD         if (client->log_start) {
2109e5896b12SAnthony PERARD             int r = client->log_start(client, start_addr, size);
2110e5896b12SAnthony PERARD             if (r < 0) {
2111e5896b12SAnthony PERARD                 return r;
2112e5896b12SAnthony PERARD             }
2113e5896b12SAnthony PERARD         }
2114e5896b12SAnthony PERARD     }
2115e5896b12SAnthony PERARD     return 0;
2116e5896b12SAnthony PERARD }
2117e5896b12SAnthony PERARD 
2118e5896b12SAnthony PERARD int cpu_physical_log_stop(target_phys_addr_t start_addr,
2119e5896b12SAnthony PERARD                           ram_addr_t size)
2120e5896b12SAnthony PERARD {
2121e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2122e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2123e5896b12SAnthony PERARD         if (client->log_stop) {
2124e5896b12SAnthony PERARD             int r = client->log_stop(client, start_addr, size);
2125e5896b12SAnthony PERARD             if (r < 0) {
2126e5896b12SAnthony PERARD                 return r;
2127e5896b12SAnthony PERARD             }
2128e5896b12SAnthony PERARD         }
2129e5896b12SAnthony PERARD     }
2130e5896b12SAnthony PERARD     return 0;
2131e5896b12SAnthony PERARD }
2132e5896b12SAnthony PERARD 
21333a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
21343a7d929eSbellard {
2135c227f099SAnthony Liguori     ram_addr_t ram_addr;
21365579c7f3Spbrook     void *p;
21373a7d929eSbellard 
213884b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
21395579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
21405579c7f3Spbrook             + tlb_entry->addend);
2141e890261fSMarcelo Tosatti         ram_addr = qemu_ram_addr_from_host_nofail(p);
21423a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
21430f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
21443a7d929eSbellard         }
21453a7d929eSbellard     }
21463a7d929eSbellard }
21473a7d929eSbellard 
21483a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
21493a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
21503a7d929eSbellard {
21513a7d929eSbellard     int i;
2152cfde4bd9SIsaku Yamahata     int mmu_idx;
2153cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21543a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2155cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2156cfde4bd9SIsaku Yamahata     }
21573a7d929eSbellard }
21583a7d929eSbellard 
21590f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
21601ccde1cbSbellard {
21610f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
21620f459d16Spbrook         tlb_entry->addr_write = vaddr;
21631ccde1cbSbellard }
21641ccde1cbSbellard 
21650f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
21660f459d16Spbrook    so that it is no longer dirty */
21670f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
21681ccde1cbSbellard {
21691ccde1cbSbellard     int i;
2170cfde4bd9SIsaku Yamahata     int mmu_idx;
21711ccde1cbSbellard 
21720f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
21731ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2174cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2175cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
21761ccde1cbSbellard }
21771ccde1cbSbellard 
2178d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by
2179d4c430a8SPaul Brook    large pages and trigger a full TLB flush if these are invalidated.  */
2180d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2181d4c430a8SPaul Brook                                target_ulong size)
2182d4c430a8SPaul Brook {
2183d4c430a8SPaul Brook     target_ulong mask = ~(size - 1);
2184d4c430a8SPaul Brook 
2185d4c430a8SPaul Brook     if (env->tlb_flush_addr == (target_ulong)-1) {
2186d4c430a8SPaul Brook         env->tlb_flush_addr = vaddr & mask;
2187d4c430a8SPaul Brook         env->tlb_flush_mask = mask;
2188d4c430a8SPaul Brook         return;
2189d4c430a8SPaul Brook     }
2190d4c430a8SPaul Brook     /* Extend the existing region to include the new page.
2191d4c430a8SPaul Brook        This is a compromise between unnecessary flushes and the cost
2192d4c430a8SPaul Brook        of maintaining a full variable size TLB.  */
2193d4c430a8SPaul Brook     mask &= env->tlb_flush_mask;
2194d4c430a8SPaul Brook     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2195d4c430a8SPaul Brook         mask <<= 1;
2196d4c430a8SPaul Brook     }
2197d4c430a8SPaul Brook     env->tlb_flush_addr &= mask;
2198d4c430a8SPaul Brook     env->tlb_flush_mask = mask;
2199d4c430a8SPaul Brook }
2200d4c430a8SPaul Brook 
2201d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address
2202d4c430a8SPaul Brook    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2203d4c430a8SPaul Brook    supplied size is only used by tlb_flush_page.  */
2204d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr,
2205c227f099SAnthony Liguori                   target_phys_addr_t paddr, int prot,
2206d4c430a8SPaul Brook                   int mmu_idx, target_ulong size)
22079fa3e853Sbellard {
220892e873b9Sbellard     PhysPageDesc *p;
22094f2ac237Sbellard     unsigned long pd;
22109fa3e853Sbellard     unsigned int index;
22114f2ac237Sbellard     target_ulong address;
22120f459d16Spbrook     target_ulong code_address;
2213355b1943SPaul Brook     unsigned long addend;
221484b7b8e7Sbellard     CPUTLBEntry *te;
2215a1d1bb31Saliguori     CPUWatchpoint *wp;
2216c227f099SAnthony Liguori     target_phys_addr_t iotlb;
22179fa3e853Sbellard 
2218d4c430a8SPaul Brook     assert(size >= TARGET_PAGE_SIZE);
2219d4c430a8SPaul Brook     if (size != TARGET_PAGE_SIZE) {
2220d4c430a8SPaul Brook         tlb_add_large_page(env, vaddr, size);
2221d4c430a8SPaul Brook     }
222292e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
22239fa3e853Sbellard     if (!p) {
22249fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
22259fa3e853Sbellard     } else {
22269fa3e853Sbellard         pd = p->phys_offset;
22279fa3e853Sbellard     }
22289fa3e853Sbellard #if defined(DEBUG_TLB)
22297fd3f494SStefan Weil     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
22307fd3f494SStefan Weil            " prot=%x idx=%d pd=0x%08lx\n",
22317fd3f494SStefan Weil            vaddr, paddr, prot, mmu_idx, pd);
22329fa3e853Sbellard #endif
22339fa3e853Sbellard 
22349fa3e853Sbellard     address = vaddr;
22350f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
22360f459d16Spbrook         /* IO memory case (romd handled later) */
22370f459d16Spbrook         address |= TLB_MMIO;
22380f459d16Spbrook     }
22395579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
22400f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
22410f459d16Spbrook         /* Normal RAM.  */
22420f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
22430f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
22440f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
22450f459d16Spbrook         else
22460f459d16Spbrook             iotlb |= IO_MEM_ROM;
22470f459d16Spbrook     } else {
2248ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
22490f459d16Spbrook            It would be nice to pass an offset from the base address
22500f459d16Spbrook            of that region.  This would avoid having to special case RAM,
22510f459d16Spbrook            and avoid full address decoding in every device.
22520f459d16Spbrook            We can't use the high bits of pd for this because
22530f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
22548da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
22558da3ff18Spbrook         if (p) {
22568da3ff18Spbrook             iotlb += p->region_offset;
22578da3ff18Spbrook         } else {
22588da3ff18Spbrook             iotlb += paddr;
22598da3ff18Spbrook         }
22609fa3e853Sbellard     }
22619fa3e853Sbellard 
22620f459d16Spbrook     code_address = address;
22636658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
22646658ffb8Spbrook        watchpoint trap routines.  */
226572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2266a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2267bf298f83SJun Koi             /* Avoid trapping reads of pages with a write breakpoint. */
2268bf298f83SJun Koi             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
22690f459d16Spbrook                 iotlb = io_mem_watch + paddr;
22700f459d16Spbrook                 address |= TLB_MMIO;
2271bf298f83SJun Koi                 break;
2272bf298f83SJun Koi             }
22736658ffb8Spbrook         }
22746658ffb8Spbrook     }
22756658ffb8Spbrook 
227690f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
22770f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
22786ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
22790f459d16Spbrook     te->addend = addend - vaddr;
228067b915a5Sbellard     if (prot & PAGE_READ) {
228184b7b8e7Sbellard         te->addr_read = address;
22829fa3e853Sbellard     } else {
228384b7b8e7Sbellard         te->addr_read = -1;
228484b7b8e7Sbellard     }
22855c751e99Sedgar_igl 
228684b7b8e7Sbellard     if (prot & PAGE_EXEC) {
22870f459d16Spbrook         te->addr_code = code_address;
228884b7b8e7Sbellard     } else {
228984b7b8e7Sbellard         te->addr_code = -1;
22909fa3e853Sbellard     }
229167b915a5Sbellard     if (prot & PAGE_WRITE) {
2292856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2293856074ecSbellard             (pd & IO_MEM_ROMD)) {
22940f459d16Spbrook             /* Write access calls the I/O callback.  */
22950f459d16Spbrook             te->addr_write = address | TLB_MMIO;
22963a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
22971ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
22980f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
22999fa3e853Sbellard         } else {
230084b7b8e7Sbellard             te->addr_write = address;
23019fa3e853Sbellard         }
23029fa3e853Sbellard     } else {
230384b7b8e7Sbellard         te->addr_write = -1;
23049fa3e853Sbellard     }
23059fa3e853Sbellard }
23069fa3e853Sbellard 
23070124311eSbellard #else
23080124311eSbellard 
2309ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
23100124311eSbellard {
23110124311eSbellard }
23120124311eSbellard 
23132e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
23140124311eSbellard {
23150124311eSbellard }
23160124311eSbellard 
2317edf8e2afSMika Westerberg /*
2318edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2319edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2320edf8e2afSMika Westerberg  */
23215cd2c5b6SRichard Henderson 
23225cd2c5b6SRichard Henderson struct walk_memory_regions_data
232333417e70Sbellard {
23245cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
23255cd2c5b6SRichard Henderson     void *priv;
23265cd2c5b6SRichard Henderson     unsigned long start;
23275cd2c5b6SRichard Henderson     int prot;
23285cd2c5b6SRichard Henderson };
23299fa3e853Sbellard 
23305cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2331b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
23325cd2c5b6SRichard Henderson {
23335cd2c5b6SRichard Henderson     if (data->start != -1ul) {
23345cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
23355cd2c5b6SRichard Henderson         if (rc != 0) {
23365cd2c5b6SRichard Henderson             return rc;
23375cd2c5b6SRichard Henderson         }
23385cd2c5b6SRichard Henderson     }
2339edf8e2afSMika Westerberg 
23405cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
23415cd2c5b6SRichard Henderson     data->prot = new_prot;
23425cd2c5b6SRichard Henderson 
23435cd2c5b6SRichard Henderson     return 0;
234433417e70Sbellard }
23455cd2c5b6SRichard Henderson 
23465cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2347b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
23485cd2c5b6SRichard Henderson {
2349b480d9b7SPaul Brook     abi_ulong pa;
23505cd2c5b6SRichard Henderson     int i, rc;
23515cd2c5b6SRichard Henderson 
23525cd2c5b6SRichard Henderson     if (*lp == NULL) {
23535cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
23549fa3e853Sbellard     }
23555cd2c5b6SRichard Henderson 
23565cd2c5b6SRichard Henderson     if (level == 0) {
23575cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
23587296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
23595cd2c5b6SRichard Henderson             int prot = pd[i].flags;
23605cd2c5b6SRichard Henderson 
23615cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
23625cd2c5b6SRichard Henderson             if (prot != data->prot) {
23635cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
23645cd2c5b6SRichard Henderson                 if (rc != 0) {
23655cd2c5b6SRichard Henderson                     return rc;
23669fa3e853Sbellard                 }
23679fa3e853Sbellard             }
23685cd2c5b6SRichard Henderson         }
23695cd2c5b6SRichard Henderson     } else {
23705cd2c5b6SRichard Henderson         void **pp = *lp;
23717296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
2372b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
2373b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
23745cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
23755cd2c5b6SRichard Henderson             if (rc != 0) {
23765cd2c5b6SRichard Henderson                 return rc;
23775cd2c5b6SRichard Henderson             }
23785cd2c5b6SRichard Henderson         }
23795cd2c5b6SRichard Henderson     }
23805cd2c5b6SRichard Henderson 
23815cd2c5b6SRichard Henderson     return 0;
23825cd2c5b6SRichard Henderson }
23835cd2c5b6SRichard Henderson 
23845cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
23855cd2c5b6SRichard Henderson {
23865cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
23875cd2c5b6SRichard Henderson     unsigned long i;
23885cd2c5b6SRichard Henderson 
23895cd2c5b6SRichard Henderson     data.fn = fn;
23905cd2c5b6SRichard Henderson     data.priv = priv;
23915cd2c5b6SRichard Henderson     data.start = -1ul;
23925cd2c5b6SRichard Henderson     data.prot = 0;
23935cd2c5b6SRichard Henderson 
23945cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2395b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
23965cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
23975cd2c5b6SRichard Henderson         if (rc != 0) {
23985cd2c5b6SRichard Henderson             return rc;
23995cd2c5b6SRichard Henderson         }
24005cd2c5b6SRichard Henderson     }
24015cd2c5b6SRichard Henderson 
24025cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2403edf8e2afSMika Westerberg }
2404edf8e2afSMika Westerberg 
2405b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2406b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2407edf8e2afSMika Westerberg {
2408edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2409edf8e2afSMika Westerberg 
2410b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2411b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2412edf8e2afSMika Westerberg         start, end, end - start,
2413edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2414edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2415edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2416edf8e2afSMika Westerberg 
2417edf8e2afSMika Westerberg     return (0);
2418edf8e2afSMika Westerberg }
2419edf8e2afSMika Westerberg 
2420edf8e2afSMika Westerberg /* dump memory mappings */
2421edf8e2afSMika Westerberg void page_dump(FILE *f)
2422edf8e2afSMika Westerberg {
2423edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2424edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2425edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
24269fa3e853Sbellard }
24279fa3e853Sbellard 
242853a5960aSpbrook int page_get_flags(target_ulong address)
24299fa3e853Sbellard {
24309fa3e853Sbellard     PageDesc *p;
24319fa3e853Sbellard 
24329fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
24339fa3e853Sbellard     if (!p)
24349fa3e853Sbellard         return 0;
24359fa3e853Sbellard     return p->flags;
24369fa3e853Sbellard }
24379fa3e853Sbellard 
2438376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2439376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2440376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
244153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
24429fa3e853Sbellard {
2443376a7909SRichard Henderson     target_ulong addr, len;
24449fa3e853Sbellard 
2445376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2446376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2447376a7909SRichard Henderson        a missing call to h2g_valid.  */
2448b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2449b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2450376a7909SRichard Henderson #endif
2451376a7909SRichard Henderson     assert(start < end);
2452376a7909SRichard Henderson 
24539fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
24549fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2455376a7909SRichard Henderson 
2456376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
24579fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2458376a7909SRichard Henderson     }
2459376a7909SRichard Henderson 
2460376a7909SRichard Henderson     for (addr = start, len = end - start;
2461376a7909SRichard Henderson          len != 0;
2462376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2463376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2464376a7909SRichard Henderson 
2465376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2466376a7909SRichard Henderson            the code inside.  */
24679fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
24689fa3e853Sbellard             (flags & PAGE_WRITE) &&
24699fa3e853Sbellard             p->first_tb) {
2470d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
24719fa3e853Sbellard         }
24729fa3e853Sbellard         p->flags = flags;
24739fa3e853Sbellard     }
24749fa3e853Sbellard }
24759fa3e853Sbellard 
24763d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
24773d97b40bSths {
24783d97b40bSths     PageDesc *p;
24793d97b40bSths     target_ulong end;
24803d97b40bSths     target_ulong addr;
24813d97b40bSths 
2482376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2483376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2484376a7909SRichard Henderson        a missing call to h2g_valid.  */
2485338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2486338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2487376a7909SRichard Henderson #endif
2488376a7909SRichard Henderson 
24893e0650a9SRichard Henderson     if (len == 0) {
24903e0650a9SRichard Henderson         return 0;
24913e0650a9SRichard Henderson     }
2492376a7909SRichard Henderson     if (start + len - 1 < start) {
2493376a7909SRichard Henderson         /* We've wrapped around.  */
249455f280c9Sbalrog         return -1;
2495376a7909SRichard Henderson     }
249655f280c9Sbalrog 
24973d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
24983d97b40bSths     start = start & TARGET_PAGE_MASK;
24993d97b40bSths 
2500376a7909SRichard Henderson     for (addr = start, len = end - start;
2501376a7909SRichard Henderson          len != 0;
2502376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
25033d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
25043d97b40bSths         if( !p )
25053d97b40bSths             return -1;
25063d97b40bSths         if( !(p->flags & PAGE_VALID) )
25073d97b40bSths             return -1;
25083d97b40bSths 
2509dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
25103d97b40bSths             return -1;
2511dae3270cSbellard         if (flags & PAGE_WRITE) {
2512dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
25133d97b40bSths                 return -1;
2514dae3270cSbellard             /* unprotect the page if it was put read-only because it
2515dae3270cSbellard                contains translated code */
2516dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2517dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2518dae3270cSbellard                     return -1;
2519dae3270cSbellard             }
2520dae3270cSbellard             return 0;
2521dae3270cSbellard         }
25223d97b40bSths     }
25233d97b40bSths     return 0;
25243d97b40bSths }
25253d97b40bSths 
25269fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2527ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
252853a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
25299fa3e853Sbellard {
253045d679d6SAurelien Jarno     unsigned int prot;
253145d679d6SAurelien Jarno     PageDesc *p;
253253a5960aSpbrook     target_ulong host_start, host_end, addr;
25339fa3e853Sbellard 
2534c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2535c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2536c8a706feSpbrook        practice it seems to be ok.  */
2537c8a706feSpbrook     mmap_lock();
2538c8a706feSpbrook 
253945d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
254045d679d6SAurelien Jarno     if (!p) {
2541c8a706feSpbrook         mmap_unlock();
25429fa3e853Sbellard         return 0;
2543c8a706feSpbrook     }
254445d679d6SAurelien Jarno 
25459fa3e853Sbellard     /* if the page was really writable, then we change its
25469fa3e853Sbellard        protection back to writable */
254745d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
254845d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
254945d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
255045d679d6SAurelien Jarno 
255145d679d6SAurelien Jarno         prot = 0;
255245d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
255345d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
255445d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
255545d679d6SAurelien Jarno             prot |= p->flags;
255645d679d6SAurelien Jarno 
25579fa3e853Sbellard             /* and since the content will be modified, we must invalidate
25589fa3e853Sbellard                the corresponding translated code. */
255945d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
25609fa3e853Sbellard #ifdef DEBUG_TB_CHECK
256145d679d6SAurelien Jarno             tb_invalidate_check(addr);
25629fa3e853Sbellard #endif
256345d679d6SAurelien Jarno         }
256445d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
256545d679d6SAurelien Jarno                  prot & PAGE_BITS);
256645d679d6SAurelien Jarno 
2567c8a706feSpbrook         mmap_unlock();
25689fa3e853Sbellard         return 1;
25699fa3e853Sbellard     }
2570c8a706feSpbrook     mmap_unlock();
25719fa3e853Sbellard     return 0;
25729fa3e853Sbellard }
25739fa3e853Sbellard 
25746a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
25756a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
25761ccde1cbSbellard {
25771ccde1cbSbellard }
25789fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
257933417e70Sbellard 
2580e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
25818da3ff18Spbrook 
2582c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2583c04b2b78SPaul Brook typedef struct subpage_t {
2584c04b2b78SPaul Brook     target_phys_addr_t base;
2585f6405247SRichard Henderson     ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2586f6405247SRichard Henderson     ram_addr_t region_offset[TARGET_PAGE_SIZE];
2587c04b2b78SPaul Brook } subpage_t;
2588c04b2b78SPaul Brook 
2589c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2590c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2591f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2592f6405247SRichard Henderson                                 ram_addr_t orig_memory,
2593f6405247SRichard Henderson                                 ram_addr_t region_offset);
2594db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2595db7b5426Sblueswir1                       need_subpage)                                     \
2596db7b5426Sblueswir1     do {                                                                \
2597db7b5426Sblueswir1         if (addr > start_addr)                                          \
2598db7b5426Sblueswir1             start_addr2 = 0;                                            \
2599db7b5426Sblueswir1         else {                                                          \
2600db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2601db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2602db7b5426Sblueswir1                 need_subpage = 1;                                       \
2603db7b5426Sblueswir1         }                                                               \
2604db7b5426Sblueswir1                                                                         \
260549e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2606db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2607db7b5426Sblueswir1         else {                                                          \
2608db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2609db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2610db7b5426Sblueswir1                 need_subpage = 1;                                       \
2611db7b5426Sblueswir1         }                                                               \
2612db7b5426Sblueswir1     } while (0)
2613db7b5426Sblueswir1 
26148f2498f9SMichael S. Tsirkin /* register physical memory.
26158f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
26168f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
26178da3ff18Spbrook    io memory page.  The address used when calling the IO function is
26188da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2619ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
26208da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
26218da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
26220fd542fbSMichael S. Tsirkin void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2623c227f099SAnthony Liguori                                          ram_addr_t size,
2624c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
26250fd542fbSMichael S. Tsirkin                                          ram_addr_t region_offset,
26260fd542fbSMichael S. Tsirkin                                          bool log_dirty)
262733417e70Sbellard {
2628c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
262992e873b9Sbellard     PhysPageDesc *p;
26309d42037bSbellard     CPUState *env;
2631c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2632f6405247SRichard Henderson     subpage_t *subpage;
263333417e70Sbellard 
26343b8e6a2dSEdgar E. Iglesias     assert(size);
26350fd542fbSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2636f6f3fbcaSMichael S. Tsirkin 
263767c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
263867c4d23cSpbrook         region_offset = start_addr;
263967c4d23cSpbrook     }
26408da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
26415fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2642c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
26433b8e6a2dSEdgar E. Iglesias 
26443b8e6a2dSEdgar E. Iglesias     addr = start_addr;
26453b8e6a2dSEdgar E. Iglesias     do {
2646db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2647db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2648c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2649c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2650db7b5426Sblueswir1             int need_subpage = 0;
2651db7b5426Sblueswir1 
2652db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2653db7b5426Sblueswir1                           need_subpage);
2654f6405247SRichard Henderson             if (need_subpage) {
2655db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2656db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26578da3ff18Spbrook                                            &p->phys_offset, orig_memory,
26588da3ff18Spbrook                                            p->region_offset);
2659db7b5426Sblueswir1                 } else {
2660db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2661db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2662db7b5426Sblueswir1                 }
26638da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
26648da3ff18Spbrook                                  region_offset);
26658da3ff18Spbrook                 p->region_offset = 0;
2666db7b5426Sblueswir1             } else {
2667db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2668db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2669db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2670db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2671db7b5426Sblueswir1             }
2672db7b5426Sblueswir1         } else {
2673108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
26749fa3e853Sbellard             p->phys_offset = phys_offset;
26758da3ff18Spbrook             p->region_offset = region_offset;
26762a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
26778da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
267833417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
26798da3ff18Spbrook             } else {
2680c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2681db7b5426Sblueswir1                 int need_subpage = 0;
2682db7b5426Sblueswir1 
2683db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2684db7b5426Sblueswir1                               end_addr2, need_subpage);
2685db7b5426Sblueswir1 
2686f6405247SRichard Henderson                 if (need_subpage) {
2687db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26888da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
268967c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2690db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
26918da3ff18Spbrook                                      phys_offset, region_offset);
26928da3ff18Spbrook                     p->region_offset = 0;
2693db7b5426Sblueswir1                 }
2694db7b5426Sblueswir1             }
2695db7b5426Sblueswir1         }
26968da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
26973b8e6a2dSEdgar E. Iglesias         addr += TARGET_PAGE_SIZE;
26983b8e6a2dSEdgar E. Iglesias     } while (addr != end_addr);
26999d42037bSbellard 
27009d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
27019d42037bSbellard        reset the modified entries */
27029d42037bSbellard     /* XXX: slow ! */
27039d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
27049d42037bSbellard         tlb_flush(env, 1);
27059d42037bSbellard     }
270633417e70Sbellard }
270733417e70Sbellard 
2708ba863458Sbellard /* XXX: temporary until new memory mapping API */
2709c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2710ba863458Sbellard {
2711ba863458Sbellard     PhysPageDesc *p;
2712ba863458Sbellard 
2713ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2714ba863458Sbellard     if (!p)
2715ba863458Sbellard         return IO_MEM_UNASSIGNED;
2716ba863458Sbellard     return p->phys_offset;
2717ba863458Sbellard }
2718ba863458Sbellard 
2719c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2720f65ed4c1Saliguori {
2721f65ed4c1Saliguori     if (kvm_enabled())
2722f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2723f65ed4c1Saliguori }
2724f65ed4c1Saliguori 
2725c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2726f65ed4c1Saliguori {
2727f65ed4c1Saliguori     if (kvm_enabled())
2728f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2729f65ed4c1Saliguori }
2730f65ed4c1Saliguori 
273162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
273262a2744cSSheng Yang {
273362a2744cSSheng Yang     if (kvm_enabled())
273462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
273562a2744cSSheng Yang }
273662a2744cSSheng Yang 
2737c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2738c902760fSMarcelo Tosatti 
2739c902760fSMarcelo Tosatti #include <sys/vfs.h>
2740c902760fSMarcelo Tosatti 
2741c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2742c902760fSMarcelo Tosatti 
2743c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2744c902760fSMarcelo Tosatti {
2745c902760fSMarcelo Tosatti     struct statfs fs;
2746c902760fSMarcelo Tosatti     int ret;
2747c902760fSMarcelo Tosatti 
2748c902760fSMarcelo Tosatti     do {
2749c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2750c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2751c902760fSMarcelo Tosatti 
2752c902760fSMarcelo Tosatti     if (ret != 0) {
27536adc0549SMichael Tokarev         perror(path);
2754c902760fSMarcelo Tosatti         return 0;
2755c902760fSMarcelo Tosatti     }
2756c902760fSMarcelo Tosatti 
2757c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2758c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2759c902760fSMarcelo Tosatti 
2760c902760fSMarcelo Tosatti     return fs.f_bsize;
2761c902760fSMarcelo Tosatti }
2762c902760fSMarcelo Tosatti 
276304b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
276404b16653SAlex Williamson                             ram_addr_t memory,
276504b16653SAlex Williamson                             const char *path)
2766c902760fSMarcelo Tosatti {
2767c902760fSMarcelo Tosatti     char *filename;
2768c902760fSMarcelo Tosatti     void *area;
2769c902760fSMarcelo Tosatti     int fd;
2770c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2771c902760fSMarcelo Tosatti     int flags;
2772c902760fSMarcelo Tosatti #endif
2773c902760fSMarcelo Tosatti     unsigned long hpagesize;
2774c902760fSMarcelo Tosatti 
2775c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2776c902760fSMarcelo Tosatti     if (!hpagesize) {
2777c902760fSMarcelo Tosatti         return NULL;
2778c902760fSMarcelo Tosatti     }
2779c902760fSMarcelo Tosatti 
2780c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2781c902760fSMarcelo Tosatti         return NULL;
2782c902760fSMarcelo Tosatti     }
2783c902760fSMarcelo Tosatti 
2784c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2785c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2786c902760fSMarcelo Tosatti         return NULL;
2787c902760fSMarcelo Tosatti     }
2788c902760fSMarcelo Tosatti 
2789c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2790c902760fSMarcelo Tosatti         return NULL;
2791c902760fSMarcelo Tosatti     }
2792c902760fSMarcelo Tosatti 
2793c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2794c902760fSMarcelo Tosatti     if (fd < 0) {
27956adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2796c902760fSMarcelo Tosatti         free(filename);
2797c902760fSMarcelo Tosatti         return NULL;
2798c902760fSMarcelo Tosatti     }
2799c902760fSMarcelo Tosatti     unlink(filename);
2800c902760fSMarcelo Tosatti     free(filename);
2801c902760fSMarcelo Tosatti 
2802c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2803c902760fSMarcelo Tosatti 
2804c902760fSMarcelo Tosatti     /*
2805c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2806c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2807c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2808c902760fSMarcelo Tosatti      * mmap will fail.
2809c902760fSMarcelo Tosatti      */
2810c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2811c902760fSMarcelo Tosatti         perror("ftruncate");
2812c902760fSMarcelo Tosatti 
2813c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2814c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2815c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2816c902760fSMarcelo Tosatti      * to sidestep this quirk.
2817c902760fSMarcelo Tosatti      */
2818c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2819c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2820c902760fSMarcelo Tosatti #else
2821c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2822c902760fSMarcelo Tosatti #endif
2823c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2824c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2825c902760fSMarcelo Tosatti         close(fd);
2826c902760fSMarcelo Tosatti         return (NULL);
2827c902760fSMarcelo Tosatti     }
282804b16653SAlex Williamson     block->fd = fd;
2829c902760fSMarcelo Tosatti     return area;
2830c902760fSMarcelo Tosatti }
2831c902760fSMarcelo Tosatti #endif
2832c902760fSMarcelo Tosatti 
2833d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2834d17b5288SAlex Williamson {
283504b16653SAlex Williamson     RAMBlock *block, *next_block;
283609d7ae90SBlue Swirl     ram_addr_t offset = 0, mingap = ULONG_MAX;
283704b16653SAlex Williamson 
283804b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
283904b16653SAlex Williamson         return 0;
284004b16653SAlex Williamson 
284104b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
284204b16653SAlex Williamson         ram_addr_t end, next = ULONG_MAX;
284304b16653SAlex Williamson 
284404b16653SAlex Williamson         end = block->offset + block->length;
284504b16653SAlex Williamson 
284604b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
284704b16653SAlex Williamson             if (next_block->offset >= end) {
284804b16653SAlex Williamson                 next = MIN(next, next_block->offset);
284904b16653SAlex Williamson             }
285004b16653SAlex Williamson         }
285104b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
285204b16653SAlex Williamson             offset =  end;
285304b16653SAlex Williamson             mingap = next - end;
285404b16653SAlex Williamson         }
285504b16653SAlex Williamson     }
285604b16653SAlex Williamson     return offset;
285704b16653SAlex Williamson }
285804b16653SAlex Williamson 
285904b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
286004b16653SAlex Williamson {
2861d17b5288SAlex Williamson     RAMBlock *block;
2862d17b5288SAlex Williamson     ram_addr_t last = 0;
2863d17b5288SAlex Williamson 
2864d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2865d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2866d17b5288SAlex Williamson 
2867d17b5288SAlex Williamson     return last;
2868d17b5288SAlex Williamson }
2869d17b5288SAlex Williamson 
287084b89d78SCam Macdonell ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
287184b89d78SCam Macdonell                                    ram_addr_t size, void *host)
287284b89d78SCam Macdonell {
287384b89d78SCam Macdonell     RAMBlock *new_block, *block;
287484b89d78SCam Macdonell 
287584b89d78SCam Macdonell     size = TARGET_PAGE_ALIGN(size);
287684b89d78SCam Macdonell     new_block = qemu_mallocz(sizeof(*new_block));
287784b89d78SCam Macdonell 
287884b89d78SCam Macdonell     if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
287984b89d78SCam Macdonell         char *id = dev->parent_bus->info->get_dev_path(dev);
288084b89d78SCam Macdonell         if (id) {
288184b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
288284b89d78SCam Macdonell             qemu_free(id);
288384b89d78SCam Macdonell         }
288484b89d78SCam Macdonell     }
288584b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
288684b89d78SCam Macdonell 
288784b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
288884b89d78SCam Macdonell         if (!strcmp(block->idstr, new_block->idstr)) {
288984b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
289084b89d78SCam Macdonell                     new_block->idstr);
289184b89d78SCam Macdonell             abort();
289284b89d78SCam Macdonell         }
289384b89d78SCam Macdonell     }
289484b89d78SCam Macdonell 
2895432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
28966977dfe6SYoshiaki Tamura     if (host) {
289784b89d78SCam Macdonell         new_block->host = host;
2898cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
28996977dfe6SYoshiaki Tamura     } else {
2900c902760fSMarcelo Tosatti         if (mem_path) {
2901c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
290204b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2903618a568dSMarcelo Tosatti             if (!new_block->host) {
2904618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
2905e78815a5SAndreas Färber                 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2906618a568dSMarcelo Tosatti             }
2907c902760fSMarcelo Tosatti #else
2908c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2909c902760fSMarcelo Tosatti             exit(1);
2910c902760fSMarcelo Tosatti #endif
2911c902760fSMarcelo Tosatti         } else {
29126b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2913ff83678aSChristian Borntraeger             /* S390 KVM requires the topmost vma of the RAM to be smaller than
2914ff83678aSChristian Borntraeger                an system defined value, which is at least 256GB. Larger systems
2915ff83678aSChristian Borntraeger                have larger values. We put the guest between the end of data
2916ff83678aSChristian Borntraeger                segment (system break) and this value. We use 32GB as a base to
2917ff83678aSChristian Borntraeger                have enough room for the system break to grow. */
2918ff83678aSChristian Borntraeger             new_block->host = mmap((void*)0x800000000, size,
2919c902760fSMarcelo Tosatti                                    PROT_EXEC|PROT_READ|PROT_WRITE,
2920ff83678aSChristian Borntraeger                                    MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2921fb8b2735SAlexander Graf             if (new_block->host == MAP_FAILED) {
2922fb8b2735SAlexander Graf                 fprintf(stderr, "Allocating RAM failed\n");
2923fb8b2735SAlexander Graf                 abort();
2924fb8b2735SAlexander Graf             }
29256b02494dSAlexander Graf #else
2926432d268cSJun Nakajima             if (xen_mapcache_enabled()) {
2927432d268cSJun Nakajima                 xen_ram_alloc(new_block->offset, size);
2928432d268cSJun Nakajima             } else {
292994a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2930432d268cSJun Nakajima             }
29316b02494dSAlexander Graf #endif
2932e78815a5SAndreas Färber             qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2933c902760fSMarcelo Tosatti         }
29346977dfe6SYoshiaki Tamura     }
293594a6b54fSpbrook     new_block->length = size;
293694a6b54fSpbrook 
2937f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
293894a6b54fSpbrook 
2939f471a17eSAlex Williamson     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
294004b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
2941d17b5288SAlex Williamson     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
294294a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
294394a6b54fSpbrook 
29446f0437e8SJan Kiszka     if (kvm_enabled())
29456f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
29466f0437e8SJan Kiszka 
294794a6b54fSpbrook     return new_block->offset;
294894a6b54fSpbrook }
2949e9a1ab19Sbellard 
29506977dfe6SYoshiaki Tamura ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
29516977dfe6SYoshiaki Tamura {
29526977dfe6SYoshiaki Tamura     return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
29536977dfe6SYoshiaki Tamura }
29546977dfe6SYoshiaki Tamura 
29551f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
29561f2e98b6SAlex Williamson {
29571f2e98b6SAlex Williamson     RAMBlock *block;
29581f2e98b6SAlex Williamson 
29591f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
29601f2e98b6SAlex Williamson         if (addr == block->offset) {
29611f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
29621f2e98b6SAlex Williamson             qemu_free(block);
29631f2e98b6SAlex Williamson             return;
29641f2e98b6SAlex Williamson         }
29651f2e98b6SAlex Williamson     }
29661f2e98b6SAlex Williamson }
29671f2e98b6SAlex Williamson 
2968c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2969e9a1ab19Sbellard {
297004b16653SAlex Williamson     RAMBlock *block;
297104b16653SAlex Williamson 
297204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
297304b16653SAlex Williamson         if (addr == block->offset) {
297404b16653SAlex Williamson             QLIST_REMOVE(block, next);
2975cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2976cd19cfa2SHuang Ying                 ;
2977cd19cfa2SHuang Ying             } else if (mem_path) {
297804b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
297904b16653SAlex Williamson                 if (block->fd) {
298004b16653SAlex Williamson                     munmap(block->host, block->length);
298104b16653SAlex Williamson                     close(block->fd);
298204b16653SAlex Williamson                 } else {
298304b16653SAlex Williamson                     qemu_vfree(block->host);
298404b16653SAlex Williamson                 }
2985fd28aa13SJan Kiszka #else
2986fd28aa13SJan Kiszka                 abort();
298704b16653SAlex Williamson #endif
298804b16653SAlex Williamson             } else {
298904b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
299004b16653SAlex Williamson                 munmap(block->host, block->length);
299104b16653SAlex Williamson #else
2992432d268cSJun Nakajima                 if (xen_mapcache_enabled()) {
2993432d268cSJun Nakajima                     qemu_invalidate_entry(block->host);
2994432d268cSJun Nakajima                 } else {
299504b16653SAlex Williamson                     qemu_vfree(block->host);
2996432d268cSJun Nakajima                 }
299704b16653SAlex Williamson #endif
299804b16653SAlex Williamson             }
299904b16653SAlex Williamson             qemu_free(block);
300004b16653SAlex Williamson             return;
300104b16653SAlex Williamson         }
300204b16653SAlex Williamson     }
300304b16653SAlex Williamson 
3004e9a1ab19Sbellard }
3005e9a1ab19Sbellard 
3006cd19cfa2SHuang Ying #ifndef _WIN32
3007cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3008cd19cfa2SHuang Ying {
3009cd19cfa2SHuang Ying     RAMBlock *block;
3010cd19cfa2SHuang Ying     ram_addr_t offset;
3011cd19cfa2SHuang Ying     int flags;
3012cd19cfa2SHuang Ying     void *area, *vaddr;
3013cd19cfa2SHuang Ying 
3014cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
3015cd19cfa2SHuang Ying         offset = addr - block->offset;
3016cd19cfa2SHuang Ying         if (offset < block->length) {
3017cd19cfa2SHuang Ying             vaddr = block->host + offset;
3018cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
3019cd19cfa2SHuang Ying                 ;
3020cd19cfa2SHuang Ying             } else {
3021cd19cfa2SHuang Ying                 flags = MAP_FIXED;
3022cd19cfa2SHuang Ying                 munmap(vaddr, length);
3023cd19cfa2SHuang Ying                 if (mem_path) {
3024cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
3025cd19cfa2SHuang Ying                     if (block->fd) {
3026cd19cfa2SHuang Ying #ifdef MAP_POPULATE
3027cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3028cd19cfa2SHuang Ying                             MAP_PRIVATE;
3029cd19cfa2SHuang Ying #else
3030cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
3031cd19cfa2SHuang Ying #endif
3032cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3033cd19cfa2SHuang Ying                                     flags, block->fd, offset);
3034cd19cfa2SHuang Ying                     } else {
3035cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3036cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3037cd19cfa2SHuang Ying                                     flags, -1, 0);
3038cd19cfa2SHuang Ying                     }
3039fd28aa13SJan Kiszka #else
3040fd28aa13SJan Kiszka                     abort();
3041cd19cfa2SHuang Ying #endif
3042cd19cfa2SHuang Ying                 } else {
3043cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3044cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
3045cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3046cd19cfa2SHuang Ying                                 flags, -1, 0);
3047cd19cfa2SHuang Ying #else
3048cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3049cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3050cd19cfa2SHuang Ying                                 flags, -1, 0);
3051cd19cfa2SHuang Ying #endif
3052cd19cfa2SHuang Ying                 }
3053cd19cfa2SHuang Ying                 if (area != vaddr) {
3054cd19cfa2SHuang Ying                     fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3055cd19cfa2SHuang Ying                             length, addr);
3056cd19cfa2SHuang Ying                     exit(1);
3057cd19cfa2SHuang Ying                 }
3058cd19cfa2SHuang Ying                 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3059cd19cfa2SHuang Ying             }
3060cd19cfa2SHuang Ying             return;
3061cd19cfa2SHuang Ying         }
3062cd19cfa2SHuang Ying     }
3063cd19cfa2SHuang Ying }
3064cd19cfa2SHuang Ying #endif /* !_WIN32 */
3065cd19cfa2SHuang Ying 
3066dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
30675579c7f3Spbrook    With the exception of the softmmu code in this file, this should
30685579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
30695579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
30705579c7f3Spbrook 
30715579c7f3Spbrook    It should not be used for general purpose DMA.
30725579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
30735579c7f3Spbrook  */
3074c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
3075dc828ca1Spbrook {
307694a6b54fSpbrook     RAMBlock *block;
307794a6b54fSpbrook 
3078f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3079f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
30807d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
30817d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
3082f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
3083f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
30847d82af38SVincent Palatin             }
3085432d268cSJun Nakajima             if (xen_mapcache_enabled()) {
3086432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3087432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3088432d268cSJun Nakajima                  */
3089432d268cSJun Nakajima                 if (block->offset == 0) {
3090432d268cSJun Nakajima                     return qemu_map_cache(addr, 0, 1);
3091432d268cSJun Nakajima                 } else if (block->host == NULL) {
30926506e4f9SStefano Stabellini                     block->host = qemu_map_cache(block->offset, block->length, 1);
3093432d268cSJun Nakajima                 }
3094432d268cSJun Nakajima             }
3095f471a17eSAlex Williamson             return block->host + (addr - block->offset);
309694a6b54fSpbrook         }
3097f471a17eSAlex Williamson     }
3098f471a17eSAlex Williamson 
309994a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
310094a6b54fSpbrook     abort();
3101f471a17eSAlex Williamson 
3102f471a17eSAlex Williamson     return NULL;
3103dc828ca1Spbrook }
3104dc828ca1Spbrook 
3105b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
3106b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3107b2e0a138SMichael S. Tsirkin  */
3108b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
3109b2e0a138SMichael S. Tsirkin {
3110b2e0a138SMichael S. Tsirkin     RAMBlock *block;
3111b2e0a138SMichael S. Tsirkin 
3112b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
3113b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
3114432d268cSJun Nakajima             if (xen_mapcache_enabled()) {
3115432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3116432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3117432d268cSJun Nakajima                  */
3118432d268cSJun Nakajima                 if (block->offset == 0) {
3119432d268cSJun Nakajima                     return qemu_map_cache(addr, 0, 1);
3120432d268cSJun Nakajima                 } else if (block->host == NULL) {
31216506e4f9SStefano Stabellini                     block->host = qemu_map_cache(block->offset, block->length, 1);
3122432d268cSJun Nakajima                 }
3123432d268cSJun Nakajima             }
3124b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
3125b2e0a138SMichael S. Tsirkin         }
3126b2e0a138SMichael S. Tsirkin     }
3127b2e0a138SMichael S. Tsirkin 
3128b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3129b2e0a138SMichael S. Tsirkin     abort();
3130b2e0a138SMichael S. Tsirkin 
3131b2e0a138SMichael S. Tsirkin     return NULL;
3132b2e0a138SMichael S. Tsirkin }
3133b2e0a138SMichael S. Tsirkin 
3134050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
3135050a0ddfSAnthony PERARD {
3136050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
3137050a0ddfSAnthony PERARD 
3138050a0ddfSAnthony PERARD     if (xen_mapcache_enabled()) {
31396506e4f9SStefano Stabellini             qemu_invalidate_entry(block->host);
3140050a0ddfSAnthony PERARD     }
3141050a0ddfSAnthony PERARD }
3142050a0ddfSAnthony PERARD 
3143e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
31445579c7f3Spbrook {
314594a6b54fSpbrook     RAMBlock *block;
314694a6b54fSpbrook     uint8_t *host = ptr;
314794a6b54fSpbrook 
3148f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3149432d268cSJun Nakajima         /* This case append when the block is not mapped. */
3150432d268cSJun Nakajima         if (block->host == NULL) {
3151432d268cSJun Nakajima             continue;
3152432d268cSJun Nakajima         }
3153f471a17eSAlex Williamson         if (host - block->host < block->length) {
3154e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
3155e890261fSMarcelo Tosatti             return 0;
315694a6b54fSpbrook         }
3157f471a17eSAlex Williamson     }
3158432d268cSJun Nakajima 
3159432d268cSJun Nakajima     if (xen_mapcache_enabled()) {
3160432d268cSJun Nakajima         *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3161432d268cSJun Nakajima         return 0;
3162432d268cSJun Nakajima     }
3163432d268cSJun Nakajima 
3164e890261fSMarcelo Tosatti     return -1;
3165e890261fSMarcelo Tosatti }
3166f471a17eSAlex Williamson 
3167e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
3168e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
3169e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3170e890261fSMarcelo Tosatti {
3171e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
3172e890261fSMarcelo Tosatti 
3173e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
317494a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
317594a6b54fSpbrook         abort();
3176e890261fSMarcelo Tosatti     }
3177e890261fSMarcelo Tosatti     return ram_addr;
31785579c7f3Spbrook }
31795579c7f3Spbrook 
3180c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
318133417e70Sbellard {
318267d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3183ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
318467d3b957Spbrook #endif
31855b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3186e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
3187e18231a3Sblueswir1 #endif
3188e18231a3Sblueswir1     return 0;
3189e18231a3Sblueswir1 }
3190e18231a3Sblueswir1 
3191c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3192e18231a3Sblueswir1 {
3193e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3194e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3195e18231a3Sblueswir1 #endif
31965b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3197e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
3198e18231a3Sblueswir1 #endif
3199e18231a3Sblueswir1     return 0;
3200e18231a3Sblueswir1 }
3201e18231a3Sblueswir1 
3202c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3203e18231a3Sblueswir1 {
3204e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3205e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3206e18231a3Sblueswir1 #endif
32075b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3208e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
3209b4f0a316Sblueswir1 #endif
321033417e70Sbellard     return 0;
321133417e70Sbellard }
321233417e70Sbellard 
3213c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
321433417e70Sbellard {
321567d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3216ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
321767d3b957Spbrook #endif
32185b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3219e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
3220e18231a3Sblueswir1 #endif
3221e18231a3Sblueswir1 }
3222e18231a3Sblueswir1 
3223c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3224e18231a3Sblueswir1 {
3225e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3226e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3227e18231a3Sblueswir1 #endif
32285b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3229e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
3230e18231a3Sblueswir1 #endif
3231e18231a3Sblueswir1 }
3232e18231a3Sblueswir1 
3233c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3234e18231a3Sblueswir1 {
3235e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3236e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3237e18231a3Sblueswir1 #endif
32385b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3239e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
3240b4f0a316Sblueswir1 #endif
324133417e70Sbellard }
324233417e70Sbellard 
3243d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
324433417e70Sbellard     unassigned_mem_readb,
3245e18231a3Sblueswir1     unassigned_mem_readw,
3246e18231a3Sblueswir1     unassigned_mem_readl,
324733417e70Sbellard };
324833417e70Sbellard 
3249d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
325033417e70Sbellard     unassigned_mem_writeb,
3251e18231a3Sblueswir1     unassigned_mem_writew,
3252e18231a3Sblueswir1     unassigned_mem_writel,
325333417e70Sbellard };
325433417e70Sbellard 
3255c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
32560f459d16Spbrook                                 uint32_t val)
32571ccde1cbSbellard {
32583a7d929eSbellard     int dirty_flags;
3259f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
32603a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
32613a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
32623a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
3263f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
32643a7d929eSbellard #endif
32653a7d929eSbellard     }
32665579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
3267f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3268f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3269f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3270f23db169Sbellard        flushed */
3271f23db169Sbellard     if (dirty_flags == 0xff)
32722e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
32731ccde1cbSbellard }
32741ccde1cbSbellard 
3275c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
32760f459d16Spbrook                                 uint32_t val)
32771ccde1cbSbellard {
32783a7d929eSbellard     int dirty_flags;
3279f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
32803a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
32813a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
32823a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
3283f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
32843a7d929eSbellard #endif
32853a7d929eSbellard     }
32865579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
3287f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3288f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3289f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3290f23db169Sbellard        flushed */
3291f23db169Sbellard     if (dirty_flags == 0xff)
32922e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
32931ccde1cbSbellard }
32941ccde1cbSbellard 
3295c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
32960f459d16Spbrook                                 uint32_t val)
32971ccde1cbSbellard {
32983a7d929eSbellard     int dirty_flags;
3299f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33003a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33013a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33023a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
3303f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33043a7d929eSbellard #endif
33053a7d929eSbellard     }
33065579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
3307f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3308f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3309f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3310f23db169Sbellard        flushed */
3311f23db169Sbellard     if (dirty_flags == 0xff)
33122e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33131ccde1cbSbellard }
33141ccde1cbSbellard 
3315d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
33163a7d929eSbellard     NULL, /* never used */
33173a7d929eSbellard     NULL, /* never used */
33183a7d929eSbellard     NULL, /* never used */
33193a7d929eSbellard };
33203a7d929eSbellard 
3321d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
33221ccde1cbSbellard     notdirty_mem_writeb,
33231ccde1cbSbellard     notdirty_mem_writew,
33241ccde1cbSbellard     notdirty_mem_writel,
33251ccde1cbSbellard };
33261ccde1cbSbellard 
33270f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
3328b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
33290f459d16Spbrook {
33300f459d16Spbrook     CPUState *env = cpu_single_env;
333106d55cc1Saliguori     target_ulong pc, cs_base;
333206d55cc1Saliguori     TranslationBlock *tb;
33330f459d16Spbrook     target_ulong vaddr;
3334a1d1bb31Saliguori     CPUWatchpoint *wp;
333506d55cc1Saliguori     int cpu_flags;
33360f459d16Spbrook 
333706d55cc1Saliguori     if (env->watchpoint_hit) {
333806d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
333906d55cc1Saliguori          * the debug interrupt so that is will trigger after the
334006d55cc1Saliguori          * current instruction. */
334106d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
334206d55cc1Saliguori         return;
334306d55cc1Saliguori     }
33442e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
334572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3346b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
3347b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
33486e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
33496e140f28Saliguori             if (!env->watchpoint_hit) {
3350a1d1bb31Saliguori                 env->watchpoint_hit = wp;
335106d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
335206d55cc1Saliguori                 if (!tb) {
33536e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
33546e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
335506d55cc1Saliguori                 }
3356618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
335706d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
335806d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
335906d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
336006d55cc1Saliguori                 } else {
336106d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
336206d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
336306d55cc1Saliguori                 }
336406d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
33650f459d16Spbrook             }
33666e140f28Saliguori         } else {
33676e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
33686e140f28Saliguori         }
33690f459d16Spbrook     }
33700f459d16Spbrook }
33710f459d16Spbrook 
33726658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
33736658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
33746658ffb8Spbrook    phys routines.  */
3375c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
33766658ffb8Spbrook {
3377b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
33786658ffb8Spbrook     return ldub_phys(addr);
33796658ffb8Spbrook }
33806658ffb8Spbrook 
3381c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
33826658ffb8Spbrook {
3383b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
33846658ffb8Spbrook     return lduw_phys(addr);
33856658ffb8Spbrook }
33866658ffb8Spbrook 
3387c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
33886658ffb8Spbrook {
3389b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
33906658ffb8Spbrook     return ldl_phys(addr);
33916658ffb8Spbrook }
33926658ffb8Spbrook 
3393c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
33946658ffb8Spbrook                              uint32_t val)
33956658ffb8Spbrook {
3396b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
33976658ffb8Spbrook     stb_phys(addr, val);
33986658ffb8Spbrook }
33996658ffb8Spbrook 
3400c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
34016658ffb8Spbrook                              uint32_t val)
34026658ffb8Spbrook {
3403b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
34046658ffb8Spbrook     stw_phys(addr, val);
34056658ffb8Spbrook }
34066658ffb8Spbrook 
3407c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
34086658ffb8Spbrook                              uint32_t val)
34096658ffb8Spbrook {
3410b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
34116658ffb8Spbrook     stl_phys(addr, val);
34126658ffb8Spbrook }
34136658ffb8Spbrook 
3414d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
34156658ffb8Spbrook     watch_mem_readb,
34166658ffb8Spbrook     watch_mem_readw,
34176658ffb8Spbrook     watch_mem_readl,
34186658ffb8Spbrook };
34196658ffb8Spbrook 
3420d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
34216658ffb8Spbrook     watch_mem_writeb,
34226658ffb8Spbrook     watch_mem_writew,
34236658ffb8Spbrook     watch_mem_writel,
34246658ffb8Spbrook };
34256658ffb8Spbrook 
3426f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio,
3427f6405247SRichard Henderson                                         target_phys_addr_t addr,
3428db7b5426Sblueswir1                                         unsigned int len)
3429db7b5426Sblueswir1 {
3430f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3431db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3432db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3433db7b5426Sblueswir1            mmio, len, addr, idx);
3434db7b5426Sblueswir1 #endif
3435db7b5426Sblueswir1 
3436f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3437f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3438f6405247SRichard Henderson     return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3439db7b5426Sblueswir1 }
3440db7b5426Sblueswir1 
3441c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3442db7b5426Sblueswir1                                      uint32_t value, unsigned int len)
3443db7b5426Sblueswir1 {
3444f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3445db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3446f6405247SRichard Henderson     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3447f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3448db7b5426Sblueswir1 #endif
3449f6405247SRichard Henderson 
3450f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3451f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3452f6405247SRichard Henderson     io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3453db7b5426Sblueswir1 }
3454db7b5426Sblueswir1 
3455c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3456db7b5426Sblueswir1 {
3457db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3458db7b5426Sblueswir1 }
3459db7b5426Sblueswir1 
3460c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3461db7b5426Sblueswir1                             uint32_t value)
3462db7b5426Sblueswir1 {
3463db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3464db7b5426Sblueswir1 }
3465db7b5426Sblueswir1 
3466c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3467db7b5426Sblueswir1 {
3468db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3469db7b5426Sblueswir1 }
3470db7b5426Sblueswir1 
3471c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3472db7b5426Sblueswir1                             uint32_t value)
3473db7b5426Sblueswir1 {
3474db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3475db7b5426Sblueswir1 }
3476db7b5426Sblueswir1 
3477c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3478db7b5426Sblueswir1 {
3479db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3480db7b5426Sblueswir1 }
3481db7b5426Sblueswir1 
3482f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr,
3483f6405247SRichard Henderson                             uint32_t value)
3484db7b5426Sblueswir1 {
3485db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3486db7b5426Sblueswir1 }
3487db7b5426Sblueswir1 
3488d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3489db7b5426Sblueswir1     &subpage_readb,
3490db7b5426Sblueswir1     &subpage_readw,
3491db7b5426Sblueswir1     &subpage_readl,
3492db7b5426Sblueswir1 };
3493db7b5426Sblueswir1 
3494d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3495db7b5426Sblueswir1     &subpage_writeb,
3496db7b5426Sblueswir1     &subpage_writew,
3497db7b5426Sblueswir1     &subpage_writel,
3498db7b5426Sblueswir1 };
3499db7b5426Sblueswir1 
3500c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3501c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3502db7b5426Sblueswir1 {
3503db7b5426Sblueswir1     int idx, eidx;
3504db7b5426Sblueswir1 
3505db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3506db7b5426Sblueswir1         return -1;
3507db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3508db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3509db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
35100bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3511db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3512db7b5426Sblueswir1 #endif
351395c318f5SGleb Natapov     if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
351495c318f5SGleb Natapov         memory = IO_MEM_UNASSIGNED;
3515f6405247SRichard Henderson     memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3516db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
3517f6405247SRichard Henderson         mmio->sub_io_index[idx] = memory;
3518f6405247SRichard Henderson         mmio->region_offset[idx] = region_offset;
3519db7b5426Sblueswir1     }
3520db7b5426Sblueswir1 
3521db7b5426Sblueswir1     return 0;
3522db7b5426Sblueswir1 }
3523db7b5426Sblueswir1 
3524f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3525f6405247SRichard Henderson                                 ram_addr_t orig_memory,
3526f6405247SRichard Henderson                                 ram_addr_t region_offset)
3527db7b5426Sblueswir1 {
3528c227f099SAnthony Liguori     subpage_t *mmio;
3529db7b5426Sblueswir1     int subpage_memory;
3530db7b5426Sblueswir1 
3531c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
35321eec614bSaliguori 
3533db7b5426Sblueswir1     mmio->base = base;
35342507c12aSAlexander Graf     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
35352507c12aSAlexander Graf                                             DEVICE_NATIVE_ENDIAN);
3536db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3537db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3538db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3539db7b5426Sblueswir1 #endif
3540db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
3541f6405247SRichard Henderson     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3542db7b5426Sblueswir1 
3543db7b5426Sblueswir1     return mmio;
3544db7b5426Sblueswir1 }
3545db7b5426Sblueswir1 
354688715657Saliguori static int get_free_io_mem_idx(void)
354788715657Saliguori {
354888715657Saliguori     int i;
354988715657Saliguori 
355088715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
355188715657Saliguori         if (!io_mem_used[i]) {
355288715657Saliguori             io_mem_used[i] = 1;
355388715657Saliguori             return i;
355488715657Saliguori         }
3555c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
355688715657Saliguori     return -1;
355788715657Saliguori }
355888715657Saliguori 
3559dd310534SAlexander Graf /*
3560dd310534SAlexander Graf  * Usually, devices operate in little endian mode. There are devices out
3561dd310534SAlexander Graf  * there that operate in big endian too. Each device gets byte swapped
3562dd310534SAlexander Graf  * mmio if plugged onto a CPU that does the other endianness.
3563dd310534SAlexander Graf  *
3564dd310534SAlexander Graf  * CPU          Device           swap?
3565dd310534SAlexander Graf  *
3566dd310534SAlexander Graf  * little       little           no
3567dd310534SAlexander Graf  * little       big              yes
3568dd310534SAlexander Graf  * big          little           yes
3569dd310534SAlexander Graf  * big          big              no
3570dd310534SAlexander Graf  */
3571dd310534SAlexander Graf 
3572dd310534SAlexander Graf typedef struct SwapEndianContainer {
3573dd310534SAlexander Graf     CPUReadMemoryFunc *read[3];
3574dd310534SAlexander Graf     CPUWriteMemoryFunc *write[3];
3575dd310534SAlexander Graf     void *opaque;
3576dd310534SAlexander Graf } SwapEndianContainer;
3577dd310534SAlexander Graf 
3578dd310534SAlexander Graf static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3579dd310534SAlexander Graf {
3580dd310534SAlexander Graf     uint32_t val;
3581dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3582dd310534SAlexander Graf     val = c->read[0](c->opaque, addr);
3583dd310534SAlexander Graf     return val;
3584dd310534SAlexander Graf }
3585dd310534SAlexander Graf 
3586dd310534SAlexander Graf static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3587dd310534SAlexander Graf {
3588dd310534SAlexander Graf     uint32_t val;
3589dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3590dd310534SAlexander Graf     val = bswap16(c->read[1](c->opaque, addr));
3591dd310534SAlexander Graf     return val;
3592dd310534SAlexander Graf }
3593dd310534SAlexander Graf 
3594dd310534SAlexander Graf static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3595dd310534SAlexander Graf {
3596dd310534SAlexander Graf     uint32_t val;
3597dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3598dd310534SAlexander Graf     val = bswap32(c->read[2](c->opaque, addr));
3599dd310534SAlexander Graf     return val;
3600dd310534SAlexander Graf }
3601dd310534SAlexander Graf 
3602dd310534SAlexander Graf static CPUReadMemoryFunc * const swapendian_readfn[3]={
3603dd310534SAlexander Graf     swapendian_mem_readb,
3604dd310534SAlexander Graf     swapendian_mem_readw,
3605dd310534SAlexander Graf     swapendian_mem_readl
3606dd310534SAlexander Graf };
3607dd310534SAlexander Graf 
3608dd310534SAlexander Graf static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3609dd310534SAlexander Graf                                   uint32_t val)
3610dd310534SAlexander Graf {
3611dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3612dd310534SAlexander Graf     c->write[0](c->opaque, addr, val);
3613dd310534SAlexander Graf }
3614dd310534SAlexander Graf 
3615dd310534SAlexander Graf static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3616dd310534SAlexander Graf                                   uint32_t val)
3617dd310534SAlexander Graf {
3618dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3619dd310534SAlexander Graf     c->write[1](c->opaque, addr, bswap16(val));
3620dd310534SAlexander Graf }
3621dd310534SAlexander Graf 
3622dd310534SAlexander Graf static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3623dd310534SAlexander Graf                                   uint32_t val)
3624dd310534SAlexander Graf {
3625dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3626dd310534SAlexander Graf     c->write[2](c->opaque, addr, bswap32(val));
3627dd310534SAlexander Graf }
3628dd310534SAlexander Graf 
3629dd310534SAlexander Graf static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3630dd310534SAlexander Graf     swapendian_mem_writeb,
3631dd310534SAlexander Graf     swapendian_mem_writew,
3632dd310534SAlexander Graf     swapendian_mem_writel
3633dd310534SAlexander Graf };
3634dd310534SAlexander Graf 
3635dd310534SAlexander Graf static void swapendian_init(int io_index)
3636dd310534SAlexander Graf {
3637dd310534SAlexander Graf     SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3638dd310534SAlexander Graf     int i;
3639dd310534SAlexander Graf 
3640dd310534SAlexander Graf     /* Swap mmio for big endian targets */
3641dd310534SAlexander Graf     c->opaque = io_mem_opaque[io_index];
3642dd310534SAlexander Graf     for (i = 0; i < 3; i++) {
3643dd310534SAlexander Graf         c->read[i] = io_mem_read[io_index][i];
3644dd310534SAlexander Graf         c->write[i] = io_mem_write[io_index][i];
3645dd310534SAlexander Graf 
3646dd310534SAlexander Graf         io_mem_read[io_index][i] = swapendian_readfn[i];
3647dd310534SAlexander Graf         io_mem_write[io_index][i] = swapendian_writefn[i];
3648dd310534SAlexander Graf     }
3649dd310534SAlexander Graf     io_mem_opaque[io_index] = c;
3650dd310534SAlexander Graf }
3651dd310534SAlexander Graf 
3652dd310534SAlexander Graf static void swapendian_del(int io_index)
3653dd310534SAlexander Graf {
3654dd310534SAlexander Graf     if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3655dd310534SAlexander Graf         qemu_free(io_mem_opaque[io_index]);
3656dd310534SAlexander Graf     }
3657dd310534SAlexander Graf }
3658dd310534SAlexander Graf 
365933417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
366033417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
36610b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
36623ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
36634254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
36644254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
36654254fab8Sblueswir1    returned if error. */
36661eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3667d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3668d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3669dd310534SAlexander Graf                                         void *opaque, enum device_endian endian)
367033417e70Sbellard {
36713cab721dSRichard Henderson     int i;
36723cab721dSRichard Henderson 
367333417e70Sbellard     if (io_index <= 0) {
367488715657Saliguori         io_index = get_free_io_mem_idx();
367588715657Saliguori         if (io_index == -1)
367688715657Saliguori             return io_index;
367733417e70Sbellard     } else {
36781eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
367933417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
368033417e70Sbellard             return -1;
368133417e70Sbellard     }
368233417e70Sbellard 
36833cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
36843cab721dSRichard Henderson         io_mem_read[io_index][i]
36853cab721dSRichard Henderson             = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
36863cab721dSRichard Henderson     }
36873cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
36883cab721dSRichard Henderson         io_mem_write[io_index][i]
36893cab721dSRichard Henderson             = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
36903cab721dSRichard Henderson     }
3691a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
3692f6405247SRichard Henderson 
3693dd310534SAlexander Graf     switch (endian) {
3694dd310534SAlexander Graf     case DEVICE_BIG_ENDIAN:
3695dd310534SAlexander Graf #ifndef TARGET_WORDS_BIGENDIAN
3696dd310534SAlexander Graf         swapendian_init(io_index);
3697dd310534SAlexander Graf #endif
3698dd310534SAlexander Graf         break;
3699dd310534SAlexander Graf     case DEVICE_LITTLE_ENDIAN:
3700dd310534SAlexander Graf #ifdef TARGET_WORDS_BIGENDIAN
3701dd310534SAlexander Graf         swapendian_init(io_index);
3702dd310534SAlexander Graf #endif
3703dd310534SAlexander Graf         break;
3704dd310534SAlexander Graf     case DEVICE_NATIVE_ENDIAN:
3705dd310534SAlexander Graf     default:
3706dd310534SAlexander Graf         break;
3707dd310534SAlexander Graf     }
3708dd310534SAlexander Graf 
3709f6405247SRichard Henderson     return (io_index << IO_MEM_SHIFT);
371033417e70Sbellard }
371161382a50Sbellard 
3712d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3713d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
3714dd310534SAlexander Graf                            void *opaque, enum device_endian endian)
37151eed09cbSAvi Kivity {
37162507c12aSAlexander Graf     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
37171eed09cbSAvi Kivity }
37181eed09cbSAvi Kivity 
371988715657Saliguori void cpu_unregister_io_memory(int io_table_address)
372088715657Saliguori {
372188715657Saliguori     int i;
372288715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
372388715657Saliguori 
3724dd310534SAlexander Graf     swapendian_del(io_index);
3725dd310534SAlexander Graf 
372688715657Saliguori     for (i=0;i < 3; i++) {
372788715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
372888715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
372988715657Saliguori     }
373088715657Saliguori     io_mem_opaque[io_index] = NULL;
373188715657Saliguori     io_mem_used[io_index] = 0;
373288715657Saliguori }
373388715657Saliguori 
3734e9179ce1SAvi Kivity static void io_mem_init(void)
3735e9179ce1SAvi Kivity {
3736e9179ce1SAvi Kivity     int i;
3737e9179ce1SAvi Kivity 
37382507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
37392507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
37402507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
37412507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
37422507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
37432507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
37442507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
37452507c12aSAlexander Graf                                  notdirty_mem_write, NULL,
37462507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
3747e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3748e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3749e9179ce1SAvi Kivity 
3750e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
37512507c12aSAlexander Graf                                           watch_mem_write, NULL,
37522507c12aSAlexander Graf                                           DEVICE_NATIVE_ENDIAN);
3753e9179ce1SAvi Kivity }
3754e9179ce1SAvi Kivity 
3755e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3756e2eef170Spbrook 
375713eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
375813eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3759a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3760a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
376113eb76e0Sbellard {
376213eb76e0Sbellard     int l, flags;
376313eb76e0Sbellard     target_ulong page;
376453a5960aSpbrook     void * p;
376513eb76e0Sbellard 
376613eb76e0Sbellard     while (len > 0) {
376713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
376813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
376913eb76e0Sbellard         if (l > len)
377013eb76e0Sbellard             l = len;
377113eb76e0Sbellard         flags = page_get_flags(page);
377213eb76e0Sbellard         if (!(flags & PAGE_VALID))
3773a68fe89cSPaul Brook             return -1;
377413eb76e0Sbellard         if (is_write) {
377513eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3776a68fe89cSPaul Brook                 return -1;
3777579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
377872fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3779a68fe89cSPaul Brook                 return -1;
378072fb7daaSaurel32             memcpy(p, buf, l);
378172fb7daaSaurel32             unlock_user(p, addr, l);
378213eb76e0Sbellard         } else {
378313eb76e0Sbellard             if (!(flags & PAGE_READ))
3784a68fe89cSPaul Brook                 return -1;
3785579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
378672fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3787a68fe89cSPaul Brook                 return -1;
378872fb7daaSaurel32             memcpy(buf, p, l);
37895b257578Saurel32             unlock_user(p, addr, 0);
379013eb76e0Sbellard         }
379113eb76e0Sbellard         len -= l;
379213eb76e0Sbellard         buf += l;
379313eb76e0Sbellard         addr += l;
379413eb76e0Sbellard     }
3795a68fe89cSPaul Brook     return 0;
379613eb76e0Sbellard }
37978df1cd07Sbellard 
379813eb76e0Sbellard #else
3799c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
380013eb76e0Sbellard                             int len, int is_write)
380113eb76e0Sbellard {
380213eb76e0Sbellard     int l, io_index;
380313eb76e0Sbellard     uint8_t *ptr;
380413eb76e0Sbellard     uint32_t val;
3805c227f099SAnthony Liguori     target_phys_addr_t page;
38062e12669aSbellard     unsigned long pd;
380792e873b9Sbellard     PhysPageDesc *p;
380813eb76e0Sbellard 
380913eb76e0Sbellard     while (len > 0) {
381013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
381113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
381213eb76e0Sbellard         if (l > len)
381313eb76e0Sbellard             l = len;
381492e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
381513eb76e0Sbellard         if (!p) {
381613eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
381713eb76e0Sbellard         } else {
381813eb76e0Sbellard             pd = p->phys_offset;
381913eb76e0Sbellard         }
382013eb76e0Sbellard 
382113eb76e0Sbellard         if (is_write) {
38223a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3823c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
382413eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38258da3ff18Spbrook                 if (p)
38266c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
38276a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
38286a00d601Sbellard                    potential bugs */
38296c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
38301c213d19Sbellard                     /* 32 bit write access */
3831c27004ecSbellard                     val = ldl_p(buf);
38326c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
383313eb76e0Sbellard                     l = 4;
38346c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
38351c213d19Sbellard                     /* 16 bit write access */
3836c27004ecSbellard                     val = lduw_p(buf);
38376c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
383813eb76e0Sbellard                     l = 2;
383913eb76e0Sbellard                 } else {
38401c213d19Sbellard                     /* 8 bit write access */
3841c27004ecSbellard                     val = ldub_p(buf);
38426c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
384313eb76e0Sbellard                     l = 1;
384413eb76e0Sbellard                 }
384513eb76e0Sbellard             } else {
3846b448f2f3Sbellard                 unsigned long addr1;
3847b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
384813eb76e0Sbellard                 /* RAM case */
38495579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
385013eb76e0Sbellard                 memcpy(ptr, buf, l);
38513a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3852b448f2f3Sbellard                     /* invalidate code */
3853b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3854b448f2f3Sbellard                     /* set dirty bit */
3855f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3856f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
385713eb76e0Sbellard                 }
3858050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
38593a7d929eSbellard             }
386013eb76e0Sbellard         } else {
38612a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
38622a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3863c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
386413eb76e0Sbellard                 /* I/O case */
386513eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38668da3ff18Spbrook                 if (p)
38676c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
38686c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
386913eb76e0Sbellard                     /* 32 bit read access */
38706c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3871c27004ecSbellard                     stl_p(buf, val);
387213eb76e0Sbellard                     l = 4;
38736c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
387413eb76e0Sbellard                     /* 16 bit read access */
38756c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3876c27004ecSbellard                     stw_p(buf, val);
387713eb76e0Sbellard                     l = 2;
387813eb76e0Sbellard                 } else {
38791c213d19Sbellard                     /* 8 bit read access */
38806c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3881c27004ecSbellard                     stb_p(buf, val);
388213eb76e0Sbellard                     l = 1;
388313eb76e0Sbellard                 }
388413eb76e0Sbellard             } else {
388513eb76e0Sbellard                 /* RAM case */
3886050a0ddfSAnthony PERARD                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3887050a0ddfSAnthony PERARD                 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3888050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
388913eb76e0Sbellard             }
389013eb76e0Sbellard         }
389113eb76e0Sbellard         len -= l;
389213eb76e0Sbellard         buf += l;
389313eb76e0Sbellard         addr += l;
389413eb76e0Sbellard     }
389513eb76e0Sbellard }
38968df1cd07Sbellard 
3897d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3898c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3899d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3900d0ecd2aaSbellard {
3901d0ecd2aaSbellard     int l;
3902d0ecd2aaSbellard     uint8_t *ptr;
3903c227f099SAnthony Liguori     target_phys_addr_t page;
3904d0ecd2aaSbellard     unsigned long pd;
3905d0ecd2aaSbellard     PhysPageDesc *p;
3906d0ecd2aaSbellard 
3907d0ecd2aaSbellard     while (len > 0) {
3908d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3909d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3910d0ecd2aaSbellard         if (l > len)
3911d0ecd2aaSbellard             l = len;
3912d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3913d0ecd2aaSbellard         if (!p) {
3914d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3915d0ecd2aaSbellard         } else {
3916d0ecd2aaSbellard             pd = p->phys_offset;
3917d0ecd2aaSbellard         }
3918d0ecd2aaSbellard 
3919d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
39202a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
39212a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3922d0ecd2aaSbellard             /* do nothing */
3923d0ecd2aaSbellard         } else {
3924d0ecd2aaSbellard             unsigned long addr1;
3925d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3926d0ecd2aaSbellard             /* ROM/RAM case */
39275579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3928d0ecd2aaSbellard             memcpy(ptr, buf, l);
3929050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
3930d0ecd2aaSbellard         }
3931d0ecd2aaSbellard         len -= l;
3932d0ecd2aaSbellard         buf += l;
3933d0ecd2aaSbellard         addr += l;
3934d0ecd2aaSbellard     }
3935d0ecd2aaSbellard }
3936d0ecd2aaSbellard 
39376d16c2f8Saliguori typedef struct {
39386d16c2f8Saliguori     void *buffer;
3939c227f099SAnthony Liguori     target_phys_addr_t addr;
3940c227f099SAnthony Liguori     target_phys_addr_t len;
39416d16c2f8Saliguori } BounceBuffer;
39426d16c2f8Saliguori 
39436d16c2f8Saliguori static BounceBuffer bounce;
39446d16c2f8Saliguori 
3945ba223c29Saliguori typedef struct MapClient {
3946ba223c29Saliguori     void *opaque;
3947ba223c29Saliguori     void (*callback)(void *opaque);
394872cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3949ba223c29Saliguori } MapClient;
3950ba223c29Saliguori 
395172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
395272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3953ba223c29Saliguori 
3954ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3955ba223c29Saliguori {
3956ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3957ba223c29Saliguori 
3958ba223c29Saliguori     client->opaque = opaque;
3959ba223c29Saliguori     client->callback = callback;
396072cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3961ba223c29Saliguori     return client;
3962ba223c29Saliguori }
3963ba223c29Saliguori 
3964ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3965ba223c29Saliguori {
3966ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3967ba223c29Saliguori 
396872cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
396934d5e948SIsaku Yamahata     qemu_free(client);
3970ba223c29Saliguori }
3971ba223c29Saliguori 
3972ba223c29Saliguori static void cpu_notify_map_clients(void)
3973ba223c29Saliguori {
3974ba223c29Saliguori     MapClient *client;
3975ba223c29Saliguori 
397672cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
397772cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3978ba223c29Saliguori         client->callback(client->opaque);
397934d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3980ba223c29Saliguori     }
3981ba223c29Saliguori }
3982ba223c29Saliguori 
39836d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
39846d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
39856d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
39866d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3987ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3988ba223c29Saliguori  * likely to succeed.
39896d16c2f8Saliguori  */
3990c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3991c227f099SAnthony Liguori                               target_phys_addr_t *plen,
39926d16c2f8Saliguori                               int is_write)
39936d16c2f8Saliguori {
3994c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
3995c227f099SAnthony Liguori     target_phys_addr_t done = 0;
39966d16c2f8Saliguori     int l;
39976d16c2f8Saliguori     uint8_t *ret = NULL;
39986d16c2f8Saliguori     uint8_t *ptr;
3999c227f099SAnthony Liguori     target_phys_addr_t page;
40006d16c2f8Saliguori     unsigned long pd;
40016d16c2f8Saliguori     PhysPageDesc *p;
40026d16c2f8Saliguori     unsigned long addr1;
40036d16c2f8Saliguori 
40046d16c2f8Saliguori     while (len > 0) {
40056d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
40066d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
40076d16c2f8Saliguori         if (l > len)
40086d16c2f8Saliguori             l = len;
40096d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
40106d16c2f8Saliguori         if (!p) {
40116d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
40126d16c2f8Saliguori         } else {
40136d16c2f8Saliguori             pd = p->phys_offset;
40146d16c2f8Saliguori         }
40156d16c2f8Saliguori 
40166d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
40176d16c2f8Saliguori             if (done || bounce.buffer) {
40186d16c2f8Saliguori                 break;
40196d16c2f8Saliguori             }
40206d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
40216d16c2f8Saliguori             bounce.addr = addr;
40226d16c2f8Saliguori             bounce.len = l;
40236d16c2f8Saliguori             if (!is_write) {
402454f7b4a3SStefan Weil                 cpu_physical_memory_read(addr, bounce.buffer, l);
40256d16c2f8Saliguori             }
40266d16c2f8Saliguori             ptr = bounce.buffer;
40276d16c2f8Saliguori         } else {
40286d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
40295579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
40306d16c2f8Saliguori         }
40316d16c2f8Saliguori         if (!done) {
40326d16c2f8Saliguori             ret = ptr;
40336d16c2f8Saliguori         } else if (ret + done != ptr) {
40346d16c2f8Saliguori             break;
40356d16c2f8Saliguori         }
40366d16c2f8Saliguori 
40376d16c2f8Saliguori         len -= l;
40386d16c2f8Saliguori         addr += l;
40396d16c2f8Saliguori         done += l;
40406d16c2f8Saliguori     }
40416d16c2f8Saliguori     *plen = done;
40426d16c2f8Saliguori     return ret;
40436d16c2f8Saliguori }
40446d16c2f8Saliguori 
40456d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
40466d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
40476d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
40486d16c2f8Saliguori  */
4049c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4050c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
40516d16c2f8Saliguori {
40526d16c2f8Saliguori     if (buffer != bounce.buffer) {
40536d16c2f8Saliguori         if (is_write) {
4054e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
40556d16c2f8Saliguori             while (access_len) {
40566d16c2f8Saliguori                 unsigned l;
40576d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
40586d16c2f8Saliguori                 if (l > access_len)
40596d16c2f8Saliguori                     l = access_len;
40606d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
40616d16c2f8Saliguori                     /* invalidate code */
40626d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
40636d16c2f8Saliguori                     /* set dirty bit */
4064f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
4065f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
40666d16c2f8Saliguori                 }
40676d16c2f8Saliguori                 addr1 += l;
40686d16c2f8Saliguori                 access_len -= l;
40696d16c2f8Saliguori             }
40706d16c2f8Saliguori         }
4071050a0ddfSAnthony PERARD         if (xen_mapcache_enabled()) {
4072050a0ddfSAnthony PERARD             uint8_t *buffer1 = buffer;
4073050a0ddfSAnthony PERARD             uint8_t *end_buffer = buffer + len;
4074050a0ddfSAnthony PERARD 
4075050a0ddfSAnthony PERARD             while (buffer1 < end_buffer) {
4076050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(buffer1);
4077050a0ddfSAnthony PERARD                 buffer1 += TARGET_PAGE_SIZE;
4078050a0ddfSAnthony PERARD             }
4079050a0ddfSAnthony PERARD         }
40806d16c2f8Saliguori         return;
40816d16c2f8Saliguori     }
40826d16c2f8Saliguori     if (is_write) {
40836d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
40846d16c2f8Saliguori     }
4085f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
40866d16c2f8Saliguori     bounce.buffer = NULL;
4087ba223c29Saliguori     cpu_notify_map_clients();
40886d16c2f8Saliguori }
4089d0ecd2aaSbellard 
40908df1cd07Sbellard /* warning: addr must be aligned */
4091c227f099SAnthony Liguori uint32_t ldl_phys(target_phys_addr_t addr)
40928df1cd07Sbellard {
40938df1cd07Sbellard     int io_index;
40948df1cd07Sbellard     uint8_t *ptr;
40958df1cd07Sbellard     uint32_t val;
40968df1cd07Sbellard     unsigned long pd;
40978df1cd07Sbellard     PhysPageDesc *p;
40988df1cd07Sbellard 
40998df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
41008df1cd07Sbellard     if (!p) {
41018df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
41028df1cd07Sbellard     } else {
41038df1cd07Sbellard         pd = p->phys_offset;
41048df1cd07Sbellard     }
41058df1cd07Sbellard 
41062a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
41072a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
41088df1cd07Sbellard         /* I/O case */
41098df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
41108da3ff18Spbrook         if (p)
41118da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
41128df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
41138df1cd07Sbellard     } else {
41148df1cd07Sbellard         /* RAM case */
41155579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
41168df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
41178df1cd07Sbellard         val = ldl_p(ptr);
41188df1cd07Sbellard     }
41198df1cd07Sbellard     return val;
41208df1cd07Sbellard }
41218df1cd07Sbellard 
412284b7b8e7Sbellard /* warning: addr must be aligned */
4123c227f099SAnthony Liguori uint64_t ldq_phys(target_phys_addr_t addr)
412484b7b8e7Sbellard {
412584b7b8e7Sbellard     int io_index;
412684b7b8e7Sbellard     uint8_t *ptr;
412784b7b8e7Sbellard     uint64_t val;
412884b7b8e7Sbellard     unsigned long pd;
412984b7b8e7Sbellard     PhysPageDesc *p;
413084b7b8e7Sbellard 
413184b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
413284b7b8e7Sbellard     if (!p) {
413384b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
413484b7b8e7Sbellard     } else {
413584b7b8e7Sbellard         pd = p->phys_offset;
413684b7b8e7Sbellard     }
413784b7b8e7Sbellard 
41382a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
41392a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
414084b7b8e7Sbellard         /* I/O case */
414184b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
41428da3ff18Spbrook         if (p)
41438da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
414484b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
414584b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
414684b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
414784b7b8e7Sbellard #else
414884b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
414984b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
415084b7b8e7Sbellard #endif
415184b7b8e7Sbellard     } else {
415284b7b8e7Sbellard         /* RAM case */
41535579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
415484b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
415584b7b8e7Sbellard         val = ldq_p(ptr);
415684b7b8e7Sbellard     }
415784b7b8e7Sbellard     return val;
415884b7b8e7Sbellard }
415984b7b8e7Sbellard 
4160aab33094Sbellard /* XXX: optimize */
4161c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
4162aab33094Sbellard {
4163aab33094Sbellard     uint8_t val;
4164aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
4165aab33094Sbellard     return val;
4166aab33094Sbellard }
4167aab33094Sbellard 
4168733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
4169c227f099SAnthony Liguori uint32_t lduw_phys(target_phys_addr_t addr)
4170aab33094Sbellard {
4171733f0b02SMichael S. Tsirkin     int io_index;
4172733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4173733f0b02SMichael S. Tsirkin     uint64_t val;
4174733f0b02SMichael S. Tsirkin     unsigned long pd;
4175733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4176733f0b02SMichael S. Tsirkin 
4177733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4178733f0b02SMichael S. Tsirkin     if (!p) {
4179733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4180733f0b02SMichael S. Tsirkin     } else {
4181733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4182733f0b02SMichael S. Tsirkin     }
4183733f0b02SMichael S. Tsirkin 
4184733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4185733f0b02SMichael S. Tsirkin         !(pd & IO_MEM_ROMD)) {
4186733f0b02SMichael S. Tsirkin         /* I/O case */
4187733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4188733f0b02SMichael S. Tsirkin         if (p)
4189733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4190733f0b02SMichael S. Tsirkin         val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4191733f0b02SMichael S. Tsirkin     } else {
4192733f0b02SMichael S. Tsirkin         /* RAM case */
4193733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4194733f0b02SMichael S. Tsirkin             (addr & ~TARGET_PAGE_MASK);
4195733f0b02SMichael S. Tsirkin         val = lduw_p(ptr);
4196733f0b02SMichael S. Tsirkin     }
4197733f0b02SMichael S. Tsirkin     return val;
4198aab33094Sbellard }
4199aab33094Sbellard 
42008df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
42018df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
42028df1cd07Sbellard    bits are used to track modified PTEs */
4203c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
42048df1cd07Sbellard {
42058df1cd07Sbellard     int io_index;
42068df1cd07Sbellard     uint8_t *ptr;
42078df1cd07Sbellard     unsigned long pd;
42088df1cd07Sbellard     PhysPageDesc *p;
42098df1cd07Sbellard 
42108df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
42118df1cd07Sbellard     if (!p) {
42128df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
42138df1cd07Sbellard     } else {
42148df1cd07Sbellard         pd = p->phys_offset;
42158df1cd07Sbellard     }
42168df1cd07Sbellard 
42173a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
42188df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
42198da3ff18Spbrook         if (p)
42208da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
42218df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
42228df1cd07Sbellard     } else {
422374576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
42245579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
42258df1cd07Sbellard         stl_p(ptr, val);
422674576198Saliguori 
422774576198Saliguori         if (unlikely(in_migration)) {
422874576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
422974576198Saliguori                 /* invalidate code */
423074576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
423174576198Saliguori                 /* set dirty bit */
4232f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
4233f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
423474576198Saliguori             }
423574576198Saliguori         }
42368df1cd07Sbellard     }
42378df1cd07Sbellard }
42388df1cd07Sbellard 
4239c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4240bc98a7efSj_mayer {
4241bc98a7efSj_mayer     int io_index;
4242bc98a7efSj_mayer     uint8_t *ptr;
4243bc98a7efSj_mayer     unsigned long pd;
4244bc98a7efSj_mayer     PhysPageDesc *p;
4245bc98a7efSj_mayer 
4246bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4247bc98a7efSj_mayer     if (!p) {
4248bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
4249bc98a7efSj_mayer     } else {
4250bc98a7efSj_mayer         pd = p->phys_offset;
4251bc98a7efSj_mayer     }
4252bc98a7efSj_mayer 
4253bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4254bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
42558da3ff18Spbrook         if (p)
42568da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4257bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
4258bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4259bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4260bc98a7efSj_mayer #else
4261bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4262bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4263bc98a7efSj_mayer #endif
4264bc98a7efSj_mayer     } else {
42655579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4266bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
4267bc98a7efSj_mayer         stq_p(ptr, val);
4268bc98a7efSj_mayer     }
4269bc98a7efSj_mayer }
4270bc98a7efSj_mayer 
42718df1cd07Sbellard /* warning: addr must be aligned */
4272c227f099SAnthony Liguori void stl_phys(target_phys_addr_t addr, uint32_t val)
42738df1cd07Sbellard {
42748df1cd07Sbellard     int io_index;
42758df1cd07Sbellard     uint8_t *ptr;
42768df1cd07Sbellard     unsigned long pd;
42778df1cd07Sbellard     PhysPageDesc *p;
42788df1cd07Sbellard 
42798df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
42808df1cd07Sbellard     if (!p) {
42818df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
42828df1cd07Sbellard     } else {
42838df1cd07Sbellard         pd = p->phys_offset;
42848df1cd07Sbellard     }
42858df1cd07Sbellard 
42863a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
42878df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
42888da3ff18Spbrook         if (p)
42898da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
42908df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
42918df1cd07Sbellard     } else {
42928df1cd07Sbellard         unsigned long addr1;
42938df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
42948df1cd07Sbellard         /* RAM case */
42955579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
42968df1cd07Sbellard         stl_p(ptr, val);
42973a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
42988df1cd07Sbellard             /* invalidate code */
42998df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
43008df1cd07Sbellard             /* set dirty bit */
4301f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
4302f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
43038df1cd07Sbellard         }
43048df1cd07Sbellard     }
43053a7d929eSbellard }
43068df1cd07Sbellard 
4307aab33094Sbellard /* XXX: optimize */
4308c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
4309aab33094Sbellard {
4310aab33094Sbellard     uint8_t v = val;
4311aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
4312aab33094Sbellard }
4313aab33094Sbellard 
4314733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
4315c227f099SAnthony Liguori void stw_phys(target_phys_addr_t addr, uint32_t val)
4316aab33094Sbellard {
4317733f0b02SMichael S. Tsirkin     int io_index;
4318733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4319733f0b02SMichael S. Tsirkin     unsigned long pd;
4320733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4321733f0b02SMichael S. Tsirkin 
4322733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4323733f0b02SMichael S. Tsirkin     if (!p) {
4324733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4325733f0b02SMichael S. Tsirkin     } else {
4326733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4327733f0b02SMichael S. Tsirkin     }
4328733f0b02SMichael S. Tsirkin 
4329733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4330733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4331733f0b02SMichael S. Tsirkin         if (p)
4332733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4333733f0b02SMichael S. Tsirkin         io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4334733f0b02SMichael S. Tsirkin     } else {
4335733f0b02SMichael S. Tsirkin         unsigned long addr1;
4336733f0b02SMichael S. Tsirkin         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4337733f0b02SMichael S. Tsirkin         /* RAM case */
4338733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
4339733f0b02SMichael S. Tsirkin         stw_p(ptr, val);
4340733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
4341733f0b02SMichael S. Tsirkin             /* invalidate code */
4342733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4343733f0b02SMichael S. Tsirkin             /* set dirty bit */
4344733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
4345733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
4346733f0b02SMichael S. Tsirkin         }
4347733f0b02SMichael S. Tsirkin     }
4348aab33094Sbellard }
4349aab33094Sbellard 
4350aab33094Sbellard /* XXX: optimize */
4351c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4352aab33094Sbellard {
4353aab33094Sbellard     val = tswap64(val);
435471d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4355aab33094Sbellard }
4356aab33094Sbellard 
43575e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
4358b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4359b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
436013eb76e0Sbellard {
436113eb76e0Sbellard     int l;
4362c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
43639b3c35e0Sj_mayer     target_ulong page;
436413eb76e0Sbellard 
436513eb76e0Sbellard     while (len > 0) {
436613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
436713eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
436813eb76e0Sbellard         /* if no physical page mapped, return an error */
436913eb76e0Sbellard         if (phys_addr == -1)
437013eb76e0Sbellard             return -1;
437113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
437213eb76e0Sbellard         if (l > len)
437313eb76e0Sbellard             l = len;
43745e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
43755e2972fdSaliguori         if (is_write)
43765e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
43775e2972fdSaliguori         else
43785e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
437913eb76e0Sbellard         len -= l;
438013eb76e0Sbellard         buf += l;
438113eb76e0Sbellard         addr += l;
438213eb76e0Sbellard     }
438313eb76e0Sbellard     return 0;
438413eb76e0Sbellard }
4385a68fe89cSPaul Brook #endif
438613eb76e0Sbellard 
43872e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
43882e70f6efSpbrook    must be at the end of the TB */
43892e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
43902e70f6efSpbrook {
43912e70f6efSpbrook     TranslationBlock *tb;
43922e70f6efSpbrook     uint32_t n, cflags;
43932e70f6efSpbrook     target_ulong pc, cs_base;
43942e70f6efSpbrook     uint64_t flags;
43952e70f6efSpbrook 
43962e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
43972e70f6efSpbrook     if (!tb) {
43982e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
43992e70f6efSpbrook                   retaddr);
44002e70f6efSpbrook     }
44012e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
4402618ba8e6SStefan Weil     cpu_restore_state(tb, env, (unsigned long)retaddr);
44032e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4404bf20dc07Sths        occurred.  */
44052e70f6efSpbrook     n = n - env->icount_decr.u16.low;
44062e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
44072e70f6efSpbrook     n++;
44082e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
44092e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4410bf20dc07Sths        the first instruction in a TB then re-execute the preceding
44112e70f6efSpbrook        branch.  */
44122e70f6efSpbrook #if defined(TARGET_MIPS)
44132e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
44142e70f6efSpbrook         env->active_tc.PC -= 4;
44152e70f6efSpbrook         env->icount_decr.u16.low++;
44162e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
44172e70f6efSpbrook     }
44182e70f6efSpbrook #elif defined(TARGET_SH4)
44192e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
44202e70f6efSpbrook             && n > 1) {
44212e70f6efSpbrook         env->pc -= 2;
44222e70f6efSpbrook         env->icount_decr.u16.low++;
44232e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
44242e70f6efSpbrook     }
44252e70f6efSpbrook #endif
44262e70f6efSpbrook     /* This should never happen.  */
44272e70f6efSpbrook     if (n > CF_COUNT_MASK)
44282e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
44292e70f6efSpbrook 
44302e70f6efSpbrook     cflags = n | CF_LAST_IO;
44312e70f6efSpbrook     pc = tb->pc;
44322e70f6efSpbrook     cs_base = tb->cs_base;
44332e70f6efSpbrook     flags = tb->flags;
44342e70f6efSpbrook     tb_phys_invalidate(tb, -1);
44352e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
44362e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
44372e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4438bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
44392e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
44402e70f6efSpbrook        repeating the fault, which is horribly inefficient.
44412e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
44422e70f6efSpbrook        second new TB.  */
44432e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
44442e70f6efSpbrook }
44452e70f6efSpbrook 
4446b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4447b3755a91SPaul Brook 
4448055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4449e3db7226Sbellard {
4450e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4451e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4452e3db7226Sbellard     TranslationBlock *tb;
4453e3db7226Sbellard 
4454e3db7226Sbellard     target_code_size = 0;
4455e3db7226Sbellard     max_target_code_size = 0;
4456e3db7226Sbellard     cross_page = 0;
4457e3db7226Sbellard     direct_jmp_count = 0;
4458e3db7226Sbellard     direct_jmp2_count = 0;
4459e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4460e3db7226Sbellard         tb = &tbs[i];
4461e3db7226Sbellard         target_code_size += tb->size;
4462e3db7226Sbellard         if (tb->size > max_target_code_size)
4463e3db7226Sbellard             max_target_code_size = tb->size;
4464e3db7226Sbellard         if (tb->page_addr[1] != -1)
4465e3db7226Sbellard             cross_page++;
4466e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4467e3db7226Sbellard             direct_jmp_count++;
4468e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4469e3db7226Sbellard                 direct_jmp2_count++;
4470e3db7226Sbellard             }
4471e3db7226Sbellard         }
4472e3db7226Sbellard     }
4473e3db7226Sbellard     /* XXX: avoid using doubles ? */
447457fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4475055403b2SStefan Weil     cpu_fprintf(f, "gen code size       %td/%ld\n",
447626a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
447726a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
447826a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4479e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4480e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4481e3db7226Sbellard                 max_target_code_size);
4482055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4483e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4484e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4485e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4486e3db7226Sbellard             cross_page,
4487e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4488e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4489e3db7226Sbellard                 direct_jmp_count,
4490e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4491e3db7226Sbellard                 direct_jmp2_count,
4492e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
449357fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4494e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4495e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4496e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4497b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4498e3db7226Sbellard }
4499e3db7226Sbellard 
450061382a50Sbellard #define MMUSUFFIX _cmmu
450161382a50Sbellard #define GETPC() NULL
450261382a50Sbellard #define env cpu_single_env
4503b769d8feSbellard #define SOFTMMU_CODE_ACCESS
450461382a50Sbellard 
450561382a50Sbellard #define SHIFT 0
450661382a50Sbellard #include "softmmu_template.h"
450761382a50Sbellard 
450861382a50Sbellard #define SHIFT 1
450961382a50Sbellard #include "softmmu_template.h"
451061382a50Sbellard 
451161382a50Sbellard #define SHIFT 2
451261382a50Sbellard #include "softmmu_template.h"
451361382a50Sbellard 
451461382a50Sbellard #define SHIFT 3
451561382a50Sbellard #include "softmmu_template.h"
451661382a50Sbellard 
451761382a50Sbellard #undef env
451861382a50Sbellard 
451961382a50Sbellard #endif
4520