xref: /qemu/system/physmem.c (revision 95c318f5e1f88d7e5bcc6deac17330fd4806a2d3)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard #include <stdlib.h>
2754936004Sbellard #include <stdio.h>
2854936004Sbellard #include <stdarg.h>
2954936004Sbellard #include <string.h>
3054936004Sbellard #include <errno.h>
3154936004Sbellard #include <unistd.h>
3254936004Sbellard #include <inttypes.h>
3354936004Sbellard 
346180a181Sbellard #include "cpu.h"
356180a181Sbellard #include "exec-all.h"
36ca10f867Saurel32 #include "qemu-common.h"
37b67d9a52Sbellard #include "tcg.h"
38b3c7724cSpbrook #include "hw/hw.h"
39cc9e98cbSAlex Williamson #include "hw/qdev.h"
4074576198Saliguori #include "osdep.h"
417ba1e619Saliguori #include "kvm.h"
4229e922b6SBlue Swirl #include "qemu-timer.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
45fd052bf6SRiku Voipio #include <signal.h>
46f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
47f01576f1SJuergen Lock #include <sys/param.h>
48f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
49f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
50f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
51f01576f1SJuergen Lock #include <sys/time.h>
52f01576f1SJuergen Lock #include <sys/proc.h>
53f01576f1SJuergen Lock #include <machine/profile.h>
54f01576f1SJuergen Lock #define _KERNEL
55f01576f1SJuergen Lock #include <sys/user.h>
56f01576f1SJuergen Lock #undef _KERNEL
57f01576f1SJuergen Lock #undef sigqueue
58f01576f1SJuergen Lock #include <libutil.h>
59f01576f1SJuergen Lock #endif
60f01576f1SJuergen Lock #endif
6153a5960aSpbrook #endif
6254936004Sbellard 
63fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6466e85a21Sbellard //#define DEBUG_FLUSH
659fa3e853Sbellard //#define DEBUG_TLB
6667d3b957Spbrook //#define DEBUG_UNASSIGNED
67fd6ce8f6Sbellard 
68fd6ce8f6Sbellard /* make various TB consistency checks */
69fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
7098857888Sbellard //#define DEBUG_TLB_CHECK
71fd6ce8f6Sbellard 
721196be37Sths //#define DEBUG_IOPORT
73db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
741196be37Sths 
7599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7699773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7799773bd4Spbrook #undef DEBUG_TB_CHECK
7899773bd4Spbrook #endif
7999773bd4Spbrook 
809fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
819fa3e853Sbellard 
82bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8324ab68acSStefan Weil static int code_gen_max_blocks;
849fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85bdaf78e0Sblueswir1 static int nb_tbs;
86eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
87c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88fd6ce8f6Sbellard 
89141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
90141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
92d03d860bSblueswir1  section close to code segment. */
93d03d860bSblueswir1 #define code_gen_section                                \
94d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
95d03d860bSblueswir1     __attribute__((aligned (32)))
96f8e2af11SStefan Weil #elif defined(_WIN32)
97f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
98f8e2af11SStefan Weil #define code_gen_section                                \
99f8e2af11SStefan Weil     __attribute__((aligned (16)))
100d03d860bSblueswir1 #else
101d03d860bSblueswir1 #define code_gen_section                                \
102d03d860bSblueswir1     __attribute__((aligned (32)))
103d03d860bSblueswir1 #endif
104d03d860bSblueswir1 
105d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
106bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
107bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10826a5f13bSbellard /* threshold to flush the translated code buffer */
109bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
11024ab68acSStefan Weil static uint8_t *code_gen_ptr;
111fd6ce8f6Sbellard 
112e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1139fa3e853Sbellard int phys_ram_fd;
11474576198Saliguori static int in_migration;
11594a6b54fSpbrook 
116f471a17eSAlex Williamson RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
117e2eef170Spbrook #endif
1189fa3e853Sbellard 
1196a00d601Sbellard CPUState *first_cpu;
1206a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1216a00d601Sbellard    cpu_exec() */
1226a00d601Sbellard CPUState *cpu_single_env;
1232e70f6efSpbrook /* 0 = Do not count executed instructions.
124bf20dc07Sths    1 = Precise instruction counting.
1252e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1262e70f6efSpbrook int use_icount = 0;
1272e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1282e70f6efSpbrook    include some instructions that have not yet been executed.  */
1292e70f6efSpbrook int64_t qemu_icount;
1306a00d601Sbellard 
13154936004Sbellard typedef struct PageDesc {
13292e873b9Sbellard     /* list of TBs intersecting this ram page */
133fd6ce8f6Sbellard     TranslationBlock *first_tb;
1349fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1359fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1369fa3e853Sbellard     unsigned int code_write_count;
1379fa3e853Sbellard     uint8_t *code_bitmap;
1389fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1399fa3e853Sbellard     unsigned long flags;
1409fa3e853Sbellard #endif
14154936004Sbellard } PageDesc;
14254936004Sbellard 
14341c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1445cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1455cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14641c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
14741c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
14841c1b1c9SPaul Brook #else
1495cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
15041c1b1c9SPaul Brook #endif
151bedb69eaSj_mayer #else
1525cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
153bedb69eaSj_mayer #endif
15454936004Sbellard 
1555cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1565cd2c5b6SRichard Henderson #define L2_BITS 10
15754936004Sbellard #define L2_SIZE (1 << L2_BITS)
15854936004Sbellard 
1595cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1605cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1615cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1625cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1635cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1645cd2c5b6SRichard Henderson 
1655cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1665cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1675cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1685cd2c5b6SRichard Henderson #else
1695cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1705cd2c5b6SRichard Henderson #endif
1715cd2c5b6SRichard Henderson 
1725cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1735cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1745cd2c5b6SRichard Henderson #else
1755cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1765cd2c5b6SRichard Henderson #endif
1775cd2c5b6SRichard Henderson 
1785cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1795cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1805cd2c5b6SRichard Henderson 
1815cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1825cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1835cd2c5b6SRichard Henderson 
18483fb7adfSbellard unsigned long qemu_real_host_page_size;
18583fb7adfSbellard unsigned long qemu_host_page_bits;
18683fb7adfSbellard unsigned long qemu_host_page_size;
18783fb7adfSbellard unsigned long qemu_host_page_mask;
18854936004Sbellard 
1895cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1905cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1915cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
19254936004Sbellard 
193e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
19441c1b1c9SPaul Brook typedef struct PhysPageDesc {
19541c1b1c9SPaul Brook     /* offset in host memory of the page + io_index in the low bits */
19641c1b1c9SPaul Brook     ram_addr_t phys_offset;
19741c1b1c9SPaul Brook     ram_addr_t region_offset;
19841c1b1c9SPaul Brook } PhysPageDesc;
19941c1b1c9SPaul Brook 
2005cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
2015cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
2025cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
2036d9a1304SPaul Brook 
204e2eef170Spbrook static void io_mem_init(void);
205e2eef170Spbrook 
20633417e70Sbellard /* io memory support */
20733417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
20833417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
209a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
210511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2116658ffb8Spbrook static int io_mem_watch;
2126658ffb8Spbrook #endif
21333417e70Sbellard 
21434865134Sbellard /* log support */
2151e8b27caSJuha Riihimäki #ifdef WIN32
2161e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2171e8b27caSJuha Riihimäki #else
218d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2191e8b27caSJuha Riihimäki #endif
22034865134Sbellard FILE *logfile;
22134865134Sbellard int loglevel;
222e735b91cSpbrook static int log_append = 0;
22334865134Sbellard 
224e3db7226Sbellard /* statistics */
225b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
226e3db7226Sbellard static int tlb_flush_count;
227b3755a91SPaul Brook #endif
228e3db7226Sbellard static int tb_flush_count;
229e3db7226Sbellard static int tb_phys_invalidate_count;
230e3db7226Sbellard 
2317cb69caeSbellard #ifdef _WIN32
2327cb69caeSbellard static void map_exec(void *addr, long size)
2337cb69caeSbellard {
2347cb69caeSbellard     DWORD old_protect;
2357cb69caeSbellard     VirtualProtect(addr, size,
2367cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2377cb69caeSbellard 
2387cb69caeSbellard }
2397cb69caeSbellard #else
2407cb69caeSbellard static void map_exec(void *addr, long size)
2417cb69caeSbellard {
2424369415fSbellard     unsigned long start, end, page_size;
2437cb69caeSbellard 
2444369415fSbellard     page_size = getpagesize();
2457cb69caeSbellard     start = (unsigned long)addr;
2464369415fSbellard     start &= ~(page_size - 1);
2477cb69caeSbellard 
2487cb69caeSbellard     end = (unsigned long)addr + size;
2494369415fSbellard     end += page_size - 1;
2504369415fSbellard     end &= ~(page_size - 1);
2517cb69caeSbellard 
2527cb69caeSbellard     mprotect((void *)start, end - start,
2537cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2547cb69caeSbellard }
2557cb69caeSbellard #endif
2567cb69caeSbellard 
257b346ff46Sbellard static void page_init(void)
25854936004Sbellard {
25983fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
26054936004Sbellard        TARGET_PAGE_SIZE */
261c2b48b69Saliguori #ifdef _WIN32
262c2b48b69Saliguori     {
263c2b48b69Saliguori         SYSTEM_INFO system_info;
264c2b48b69Saliguori 
265c2b48b69Saliguori         GetSystemInfo(&system_info);
266c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
267c2b48b69Saliguori     }
268c2b48b69Saliguori #else
269c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
270c2b48b69Saliguori #endif
27183fb7adfSbellard     if (qemu_host_page_size == 0)
27283fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
27383fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
27483fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
27583fb7adfSbellard     qemu_host_page_bits = 0;
27683fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
27783fb7adfSbellard         qemu_host_page_bits++;
27883fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
27950a9569bSbalrog 
2802e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
28150a9569bSbalrog     {
282f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
283f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
284f01576f1SJuergen Lock         int i, cnt;
285f01576f1SJuergen Lock 
286f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
287f01576f1SJuergen Lock         if (freep) {
288f01576f1SJuergen Lock             mmap_lock();
289f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
290f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
291f01576f1SJuergen Lock 
292f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
293f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
294f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
295f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296f01576f1SJuergen Lock 
297f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
298f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
299fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300f01576f1SJuergen Lock                     } else {
301f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302f01576f1SJuergen Lock                         endaddr = ~0ul;
303fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304f01576f1SJuergen Lock #endif
305f01576f1SJuergen Lock                     }
306f01576f1SJuergen Lock                 }
307f01576f1SJuergen Lock             }
308f01576f1SJuergen Lock             free(freep);
309f01576f1SJuergen Lock             mmap_unlock();
310f01576f1SJuergen Lock         }
311f01576f1SJuergen Lock #else
31250a9569bSbalrog         FILE *f;
31350a9569bSbalrog 
3140776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3155cd2c5b6SRichard Henderson 
316fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
31750a9569bSbalrog         if (f) {
3185cd2c5b6SRichard Henderson             mmap_lock();
3195cd2c5b6SRichard Henderson 
32050a9569bSbalrog             do {
3215cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3225cd2c5b6SRichard Henderson                 int n;
3235cd2c5b6SRichard Henderson 
3245cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3255cd2c5b6SRichard Henderson 
3265cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3275cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3285cd2c5b6SRichard Henderson 
3295cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3305cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3315cd2c5b6SRichard Henderson                     } else {
3325cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3335cd2c5b6SRichard Henderson                     }
3345cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
33550a9569bSbalrog                 }
33650a9569bSbalrog             } while (!feof(f));
3375cd2c5b6SRichard Henderson 
33850a9569bSbalrog             fclose(f);
339c8a706feSpbrook             mmap_unlock();
34050a9569bSbalrog         }
341f01576f1SJuergen Lock #endif
3425cd2c5b6SRichard Henderson     }
34350a9569bSbalrog #endif
34454936004Sbellard }
34554936004Sbellard 
34641c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
34754936004Sbellard {
34841c1b1c9SPaul Brook     PageDesc *pd;
34941c1b1c9SPaul Brook     void **lp;
35041c1b1c9SPaul Brook     int i;
35141c1b1c9SPaul Brook 
35217e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3532e9a5713SPaul Brook     /* We can't use qemu_malloc because it may recurse into a locked mutex. */
3545cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3555cd2c5b6SRichard Henderson     do {                                                \
3565cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3575cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3585cd2c5b6SRichard Henderson     } while (0)
3595cd2c5b6SRichard Henderson #else
3605cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3615cd2c5b6SRichard Henderson     do { P = qemu_mallocz(SIZE); } while (0)
3625cd2c5b6SRichard Henderson #endif
3635cd2c5b6SRichard Henderson 
3645cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3655cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3665cd2c5b6SRichard Henderson 
3675cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3685cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3695cd2c5b6SRichard Henderson         void **p = *lp;
3705cd2c5b6SRichard Henderson 
3715cd2c5b6SRichard Henderson         if (p == NULL) {
3725cd2c5b6SRichard Henderson             if (!alloc) {
3735cd2c5b6SRichard Henderson                 return NULL;
3745cd2c5b6SRichard Henderson             }
3755cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
37654936004Sbellard             *lp = p;
3775cd2c5b6SRichard Henderson         }
3785cd2c5b6SRichard Henderson 
3795cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3805cd2c5b6SRichard Henderson     }
3815cd2c5b6SRichard Henderson 
3825cd2c5b6SRichard Henderson     pd = *lp;
3835cd2c5b6SRichard Henderson     if (pd == NULL) {
3845cd2c5b6SRichard Henderson         if (!alloc) {
3855cd2c5b6SRichard Henderson             return NULL;
3865cd2c5b6SRichard Henderson         }
3875cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3885cd2c5b6SRichard Henderson         *lp = pd;
3895cd2c5b6SRichard Henderson     }
3905cd2c5b6SRichard Henderson 
3915cd2c5b6SRichard Henderson #undef ALLOC
3925cd2c5b6SRichard Henderson 
3935cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
39454936004Sbellard }
39554936004Sbellard 
39641c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
39754936004Sbellard {
3985cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
39954936004Sbellard }
40054936004Sbellard 
4016d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
402c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
40392e873b9Sbellard {
404e3f4e2a4Spbrook     PhysPageDesc *pd;
4055cd2c5b6SRichard Henderson     void **lp;
406e3f4e2a4Spbrook     int i;
4075cd2c5b6SRichard Henderson 
4085cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
4095cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
4105cd2c5b6SRichard Henderson 
4115cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
4125cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
4135cd2c5b6SRichard Henderson         void **p = *lp;
4145cd2c5b6SRichard Henderson         if (p == NULL) {
4155cd2c5b6SRichard Henderson             if (!alloc) {
416108c49b8Sbellard                 return NULL;
4175cd2c5b6SRichard Henderson             }
4185cd2c5b6SRichard Henderson             *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
4195cd2c5b6SRichard Henderson         }
4205cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4215cd2c5b6SRichard Henderson     }
4225cd2c5b6SRichard Henderson 
4235cd2c5b6SRichard Henderson     pd = *lp;
4245cd2c5b6SRichard Henderson     if (pd == NULL) {
4255cd2c5b6SRichard Henderson         int i;
4265cd2c5b6SRichard Henderson 
4275cd2c5b6SRichard Henderson         if (!alloc) {
4285cd2c5b6SRichard Henderson             return NULL;
4295cd2c5b6SRichard Henderson         }
4305cd2c5b6SRichard Henderson 
4315cd2c5b6SRichard Henderson         *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4325cd2c5b6SRichard Henderson 
43367c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
434e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
43567c4d23cSpbrook             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
43667c4d23cSpbrook         }
43792e873b9Sbellard     }
4385cd2c5b6SRichard Henderson 
4395cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
44092e873b9Sbellard }
44192e873b9Sbellard 
442c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
44392e873b9Sbellard {
444108c49b8Sbellard     return phys_page_find_alloc(index, 0);
44592e873b9Sbellard }
44692e873b9Sbellard 
447c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
448c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4493a7d929eSbellard                                     target_ulong vaddr);
450c8a706feSpbrook #define mmap_lock() do { } while(0)
451c8a706feSpbrook #define mmap_unlock() do { } while(0)
4529fa3e853Sbellard #endif
453fd6ce8f6Sbellard 
4544369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4554369415fSbellard 
4564369415fSbellard #if defined(CONFIG_USER_ONLY)
457ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4584369415fSbellard    user mode. It will change when a dedicated libc will be used */
4594369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4604369415fSbellard #endif
4614369415fSbellard 
4624369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
463ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
4654369415fSbellard #endif
4664369415fSbellard 
4678fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
46826a5f13bSbellard {
4694369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4704369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4714369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4724369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4734369415fSbellard #else
47426a5f13bSbellard     code_gen_buffer_size = tb_size;
47526a5f13bSbellard     if (code_gen_buffer_size == 0) {
4764369415fSbellard #if defined(CONFIG_USER_ONLY)
4774369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4784369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4794369415fSbellard #else
480ccbb4d44SStuart Brady         /* XXX: needs adjustments */
48194a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4824369415fSbellard #endif
48326a5f13bSbellard     }
48426a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
48526a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
48626a5f13bSbellard     /* The code gen buffer location may have constraints depending on
48726a5f13bSbellard        the host cpu and OS */
48826a5f13bSbellard #if defined(__linux__)
48926a5f13bSbellard     {
49026a5f13bSbellard         int flags;
491141ac468Sblueswir1         void *start = NULL;
492141ac468Sblueswir1 
49326a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
49426a5f13bSbellard #if defined(__x86_64__)
49526a5f13bSbellard         flags |= MAP_32BIT;
49626a5f13bSbellard         /* Cannot map more than that */
49726a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
49826a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
499141ac468Sblueswir1 #elif defined(__sparc_v9__)
500141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
501141ac468Sblueswir1         flags |= MAP_FIXED;
502141ac468Sblueswir1         start = (void *) 0x60000000UL;
503141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
504141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5051cb0661eSbalrog #elif defined(__arm__)
50663d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
5071cb0661eSbalrog         flags |= MAP_FIXED;
5081cb0661eSbalrog         start = (void *) 0x01000000UL;
5091cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5101cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
511eba0b893SRichard Henderson #elif defined(__s390x__)
512eba0b893SRichard Henderson         /* Map the buffer so that we can use direct calls and branches.  */
513eba0b893SRichard Henderson         /* We have a +- 4GB range on the branches; leave some slop.  */
514eba0b893SRichard Henderson         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
515eba0b893SRichard Henderson             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
516eba0b893SRichard Henderson         }
517eba0b893SRichard Henderson         start = (void *)0x90000000UL;
51826a5f13bSbellard #endif
519141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
52026a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
52126a5f13bSbellard                                flags, -1, 0);
52226a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
52326a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
52426a5f13bSbellard             exit(1);
52526a5f13bSbellard         }
52626a5f13bSbellard     }
527a167ba50SAurelien Jarno #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
52806e67a82Saliguori     {
52906e67a82Saliguori         int flags;
53006e67a82Saliguori         void *addr = NULL;
53106e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
53206e67a82Saliguori #if defined(__x86_64__)
53306e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
53406e67a82Saliguori          * 0x40000000 is free */
53506e67a82Saliguori         flags |= MAP_FIXED;
53606e67a82Saliguori         addr = (void *)0x40000000;
53706e67a82Saliguori         /* Cannot map more than that */
53806e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
53906e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
54006e67a82Saliguori #endif
54106e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
54206e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
54306e67a82Saliguori                                flags, -1, 0);
54406e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
54506e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
54606e67a82Saliguori             exit(1);
54706e67a82Saliguori         }
54806e67a82Saliguori     }
54926a5f13bSbellard #else
55026a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
55126a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
55226a5f13bSbellard #endif
5534369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
55426a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
55526a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
556239fda31SAurelien Jarno         (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
55726a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
55826a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
55926a5f13bSbellard }
56026a5f13bSbellard 
56126a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
56226a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
56326a5f13bSbellard    size. */
56426a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
56526a5f13bSbellard {
56626a5f13bSbellard     cpu_gen_init();
56726a5f13bSbellard     code_gen_alloc(tb_size);
56826a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5694369415fSbellard     page_init();
570e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
57126a5f13bSbellard     io_mem_init();
572e2eef170Spbrook #endif
5739002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
5749002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
5759002ec79SRichard Henderson        initialize the prologue now.  */
5769002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
5779002ec79SRichard Henderson #endif
57826a5f13bSbellard }
57926a5f13bSbellard 
5809656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5819656f324Spbrook 
582e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
583e7f4eff7SJuan Quintela {
584e7f4eff7SJuan Quintela     CPUState *env = opaque;
585e7f4eff7SJuan Quintela 
5863098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5873098dba0Saurel32        version_id is increased. */
5883098dba0Saurel32     env->interrupt_request &= ~0x01;
5899656f324Spbrook     tlb_flush(env, 1);
5909656f324Spbrook 
5919656f324Spbrook     return 0;
5929656f324Spbrook }
593e7f4eff7SJuan Quintela 
594e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
595e7f4eff7SJuan Quintela     .name = "cpu_common",
596e7f4eff7SJuan Quintela     .version_id = 1,
597e7f4eff7SJuan Quintela     .minimum_version_id = 1,
598e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
599e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
600e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
601e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
602e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
603e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
604e7f4eff7SJuan Quintela     }
605e7f4eff7SJuan Quintela };
6069656f324Spbrook #endif
6079656f324Spbrook 
608950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
609950f1472SGlauber Costa {
610950f1472SGlauber Costa     CPUState *env = first_cpu;
611950f1472SGlauber Costa 
612950f1472SGlauber Costa     while (env) {
613950f1472SGlauber Costa         if (env->cpu_index == cpu)
614950f1472SGlauber Costa             break;
615950f1472SGlauber Costa         env = env->next_cpu;
616950f1472SGlauber Costa     }
617950f1472SGlauber Costa 
618950f1472SGlauber Costa     return env;
619950f1472SGlauber Costa }
620950f1472SGlauber Costa 
6216a00d601Sbellard void cpu_exec_init(CPUState *env)
622fd6ce8f6Sbellard {
6236a00d601Sbellard     CPUState **penv;
6246a00d601Sbellard     int cpu_index;
6256a00d601Sbellard 
626c2764719Spbrook #if defined(CONFIG_USER_ONLY)
627c2764719Spbrook     cpu_list_lock();
628c2764719Spbrook #endif
6296a00d601Sbellard     env->next_cpu = NULL;
6306a00d601Sbellard     penv = &first_cpu;
6316a00d601Sbellard     cpu_index = 0;
6326a00d601Sbellard     while (*penv != NULL) {
6331e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6346a00d601Sbellard         cpu_index++;
6356a00d601Sbellard     }
6366a00d601Sbellard     env->cpu_index = cpu_index;
637268a362cSaliguori     env->numa_node = 0;
63872cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
63972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
6406a00d601Sbellard     *penv = env;
641c2764719Spbrook #if defined(CONFIG_USER_ONLY)
642c2764719Spbrook     cpu_list_unlock();
643c2764719Spbrook #endif
644b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6450be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
6460be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
647b3c7724cSpbrook                     cpu_save, cpu_load, env);
648b3c7724cSpbrook #endif
649fd6ce8f6Sbellard }
650fd6ce8f6Sbellard 
6519fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6529fa3e853Sbellard {
6539fa3e853Sbellard     if (p->code_bitmap) {
65459817ccbSbellard         qemu_free(p->code_bitmap);
6559fa3e853Sbellard         p->code_bitmap = NULL;
6569fa3e853Sbellard     }
6579fa3e853Sbellard     p->code_write_count = 0;
6589fa3e853Sbellard }
6599fa3e853Sbellard 
6605cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
6615cd2c5b6SRichard Henderson 
6625cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
6635cd2c5b6SRichard Henderson {
6645cd2c5b6SRichard Henderson     int i;
6655cd2c5b6SRichard Henderson 
6665cd2c5b6SRichard Henderson     if (*lp == NULL) {
6675cd2c5b6SRichard Henderson         return;
6685cd2c5b6SRichard Henderson     }
6695cd2c5b6SRichard Henderson     if (level == 0) {
6705cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
6717296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
6725cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
6735cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
6745cd2c5b6SRichard Henderson         }
6755cd2c5b6SRichard Henderson     } else {
6765cd2c5b6SRichard Henderson         void **pp = *lp;
6777296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
6785cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
6795cd2c5b6SRichard Henderson         }
6805cd2c5b6SRichard Henderson     }
6815cd2c5b6SRichard Henderson }
6825cd2c5b6SRichard Henderson 
683fd6ce8f6Sbellard static void page_flush_tb(void)
684fd6ce8f6Sbellard {
6855cd2c5b6SRichard Henderson     int i;
6865cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
6875cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
688fd6ce8f6Sbellard     }
689fd6ce8f6Sbellard }
690fd6ce8f6Sbellard 
691fd6ce8f6Sbellard /* flush all the translation blocks */
692d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
6936a00d601Sbellard void tb_flush(CPUState *env1)
694fd6ce8f6Sbellard {
6956a00d601Sbellard     CPUState *env;
6960124311eSbellard #if defined(DEBUG_FLUSH)
697ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
698ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
699ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
700ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
701fd6ce8f6Sbellard #endif
70226a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
703a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
704a208e54aSpbrook 
705fd6ce8f6Sbellard     nb_tbs = 0;
7066a00d601Sbellard 
7076a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7088a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
7096a00d601Sbellard     }
7109fa3e853Sbellard 
7118a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
712fd6ce8f6Sbellard     page_flush_tb();
7139fa3e853Sbellard 
714fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
715d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
716d4e8164fSbellard        expensive */
717e3db7226Sbellard     tb_flush_count++;
718fd6ce8f6Sbellard }
719fd6ce8f6Sbellard 
720fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
721fd6ce8f6Sbellard 
722bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
723fd6ce8f6Sbellard {
724fd6ce8f6Sbellard     TranslationBlock *tb;
725fd6ce8f6Sbellard     int i;
726fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
72799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
72899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
729fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
730fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
7310bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7320bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
73399773bd4Spbrook                        address, (long)tb->pc, tb->size);
734fd6ce8f6Sbellard             }
735fd6ce8f6Sbellard         }
736fd6ce8f6Sbellard     }
737fd6ce8f6Sbellard }
738fd6ce8f6Sbellard 
739fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
740fd6ce8f6Sbellard static void tb_page_check(void)
741fd6ce8f6Sbellard {
742fd6ce8f6Sbellard     TranslationBlock *tb;
743fd6ce8f6Sbellard     int i, flags1, flags2;
744fd6ce8f6Sbellard 
74599773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
74699773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
747fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
748fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
749fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
750fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
75199773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
752fd6ce8f6Sbellard             }
753fd6ce8f6Sbellard         }
754fd6ce8f6Sbellard     }
755fd6ce8f6Sbellard }
756fd6ce8f6Sbellard 
757fd6ce8f6Sbellard #endif
758fd6ce8f6Sbellard 
759fd6ce8f6Sbellard /* invalidate one TB */
760fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
761fd6ce8f6Sbellard                              int next_offset)
762fd6ce8f6Sbellard {
763fd6ce8f6Sbellard     TranslationBlock *tb1;
764fd6ce8f6Sbellard     for(;;) {
765fd6ce8f6Sbellard         tb1 = *ptb;
766fd6ce8f6Sbellard         if (tb1 == tb) {
767fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
768fd6ce8f6Sbellard             break;
769fd6ce8f6Sbellard         }
770fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
771fd6ce8f6Sbellard     }
772fd6ce8f6Sbellard }
773fd6ce8f6Sbellard 
7749fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
7759fa3e853Sbellard {
7769fa3e853Sbellard     TranslationBlock *tb1;
7779fa3e853Sbellard     unsigned int n1;
7789fa3e853Sbellard 
7799fa3e853Sbellard     for(;;) {
7809fa3e853Sbellard         tb1 = *ptb;
7819fa3e853Sbellard         n1 = (long)tb1 & 3;
7829fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7839fa3e853Sbellard         if (tb1 == tb) {
7849fa3e853Sbellard             *ptb = tb1->page_next[n1];
7859fa3e853Sbellard             break;
7869fa3e853Sbellard         }
7879fa3e853Sbellard         ptb = &tb1->page_next[n1];
7889fa3e853Sbellard     }
7899fa3e853Sbellard }
7909fa3e853Sbellard 
791d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
792d4e8164fSbellard {
793d4e8164fSbellard     TranslationBlock *tb1, **ptb;
794d4e8164fSbellard     unsigned int n1;
795d4e8164fSbellard 
796d4e8164fSbellard     ptb = &tb->jmp_next[n];
797d4e8164fSbellard     tb1 = *ptb;
798d4e8164fSbellard     if (tb1) {
799d4e8164fSbellard         /* find tb(n) in circular list */
800d4e8164fSbellard         for(;;) {
801d4e8164fSbellard             tb1 = *ptb;
802d4e8164fSbellard             n1 = (long)tb1 & 3;
803d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
804d4e8164fSbellard             if (n1 == n && tb1 == tb)
805d4e8164fSbellard                 break;
806d4e8164fSbellard             if (n1 == 2) {
807d4e8164fSbellard                 ptb = &tb1->jmp_first;
808d4e8164fSbellard             } else {
809d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
810d4e8164fSbellard             }
811d4e8164fSbellard         }
812d4e8164fSbellard         /* now we can suppress tb(n) from the list */
813d4e8164fSbellard         *ptb = tb->jmp_next[n];
814d4e8164fSbellard 
815d4e8164fSbellard         tb->jmp_next[n] = NULL;
816d4e8164fSbellard     }
817d4e8164fSbellard }
818d4e8164fSbellard 
819d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
820d4e8164fSbellard    another TB */
821d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
822d4e8164fSbellard {
823d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
824d4e8164fSbellard }
825d4e8164fSbellard 
82641c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
827fd6ce8f6Sbellard {
8286a00d601Sbellard     CPUState *env;
829fd6ce8f6Sbellard     PageDesc *p;
8308a40a180Sbellard     unsigned int h, n1;
83141c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
8328a40a180Sbellard     TranslationBlock *tb1, *tb2;
833fd6ce8f6Sbellard 
8349fa3e853Sbellard     /* remove the TB from the hash list */
8359fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8369fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8379fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8389fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8399fa3e853Sbellard 
8409fa3e853Sbellard     /* remove the TB from the page list */
8419fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8429fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8439fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8449fa3e853Sbellard         invalidate_page_bitmap(p);
8459fa3e853Sbellard     }
8469fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8479fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8489fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8499fa3e853Sbellard         invalidate_page_bitmap(p);
8509fa3e853Sbellard     }
8519fa3e853Sbellard 
8528a40a180Sbellard     tb_invalidated_flag = 1;
8538a40a180Sbellard 
8548a40a180Sbellard     /* remove the TB from the hash list */
8558a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8566a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8576a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8586a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8596a00d601Sbellard     }
8608a40a180Sbellard 
8618a40a180Sbellard     /* suppress this TB from the two jump lists */
8628a40a180Sbellard     tb_jmp_remove(tb, 0);
8638a40a180Sbellard     tb_jmp_remove(tb, 1);
8648a40a180Sbellard 
8658a40a180Sbellard     /* suppress any remaining jumps to this TB */
8668a40a180Sbellard     tb1 = tb->jmp_first;
8678a40a180Sbellard     for(;;) {
8688a40a180Sbellard         n1 = (long)tb1 & 3;
8698a40a180Sbellard         if (n1 == 2)
8708a40a180Sbellard             break;
8718a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8728a40a180Sbellard         tb2 = tb1->jmp_next[n1];
8738a40a180Sbellard         tb_reset_jump(tb1, n1);
8748a40a180Sbellard         tb1->jmp_next[n1] = NULL;
8758a40a180Sbellard         tb1 = tb2;
8768a40a180Sbellard     }
8778a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
8788a40a180Sbellard 
879e3db7226Sbellard     tb_phys_invalidate_count++;
8809fa3e853Sbellard }
8819fa3e853Sbellard 
8829fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8839fa3e853Sbellard {
8849fa3e853Sbellard     int end, mask, end1;
8859fa3e853Sbellard 
8869fa3e853Sbellard     end = start + len;
8879fa3e853Sbellard     tab += start >> 3;
8889fa3e853Sbellard     mask = 0xff << (start & 7);
8899fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8909fa3e853Sbellard         if (start < end) {
8919fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8929fa3e853Sbellard             *tab |= mask;
8939fa3e853Sbellard         }
8949fa3e853Sbellard     } else {
8959fa3e853Sbellard         *tab++ |= mask;
8969fa3e853Sbellard         start = (start + 8) & ~7;
8979fa3e853Sbellard         end1 = end & ~7;
8989fa3e853Sbellard         while (start < end1) {
8999fa3e853Sbellard             *tab++ = 0xff;
9009fa3e853Sbellard             start += 8;
9019fa3e853Sbellard         }
9029fa3e853Sbellard         if (start < end) {
9039fa3e853Sbellard             mask = ~(0xff << (end & 7));
9049fa3e853Sbellard             *tab |= mask;
9059fa3e853Sbellard         }
9069fa3e853Sbellard     }
9079fa3e853Sbellard }
9089fa3e853Sbellard 
9099fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
9109fa3e853Sbellard {
9119fa3e853Sbellard     int n, tb_start, tb_end;
9129fa3e853Sbellard     TranslationBlock *tb;
9139fa3e853Sbellard 
914b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9159fa3e853Sbellard 
9169fa3e853Sbellard     tb = p->first_tb;
9179fa3e853Sbellard     while (tb != NULL) {
9189fa3e853Sbellard         n = (long)tb & 3;
9199fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9209fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9219fa3e853Sbellard         if (n == 0) {
9229fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9239fa3e853Sbellard                it is not a problem */
9249fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
9259fa3e853Sbellard             tb_end = tb_start + tb->size;
9269fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
9279fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
9289fa3e853Sbellard         } else {
9299fa3e853Sbellard             tb_start = 0;
9309fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9319fa3e853Sbellard         }
9329fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9339fa3e853Sbellard         tb = tb->page_next[n];
9349fa3e853Sbellard     }
9359fa3e853Sbellard }
9369fa3e853Sbellard 
9372e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9382e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9392e70f6efSpbrook                               int flags, int cflags)
940d720b93dSbellard {
941d720b93dSbellard     TranslationBlock *tb;
942d720b93dSbellard     uint8_t *tc_ptr;
94341c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
94441c1b1c9SPaul Brook     target_ulong virt_page2;
945d720b93dSbellard     int code_gen_size;
946d720b93dSbellard 
94741c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
948c27004ecSbellard     tb = tb_alloc(pc);
949d720b93dSbellard     if (!tb) {
950d720b93dSbellard         /* flush must be done */
951d720b93dSbellard         tb_flush(env);
952d720b93dSbellard         /* cannot fail at this point */
953c27004ecSbellard         tb = tb_alloc(pc);
9542e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9552e70f6efSpbrook         tb_invalidated_flag = 1;
956d720b93dSbellard     }
957d720b93dSbellard     tc_ptr = code_gen_ptr;
958d720b93dSbellard     tb->tc_ptr = tc_ptr;
959d720b93dSbellard     tb->cs_base = cs_base;
960d720b93dSbellard     tb->flags = flags;
961d720b93dSbellard     tb->cflags = cflags;
962d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
963d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
964d720b93dSbellard 
965d720b93dSbellard     /* check next page if needed */
966c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
967d720b93dSbellard     phys_page2 = -1;
968c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
96941c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
970d720b93dSbellard     }
97141c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
9722e70f6efSpbrook     return tb;
973d720b93dSbellard }
974d720b93dSbellard 
9759fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
9769fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
977d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
978d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
979d720b93dSbellard    TB if code is modified inside this TB. */
98041c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
981d720b93dSbellard                                    int is_cpu_write_access)
9829fa3e853Sbellard {
9836b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
984d720b93dSbellard     CPUState *env = cpu_single_env;
98541c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
9866b917547Saliguori     PageDesc *p;
9876b917547Saliguori     int n;
9886b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9896b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9906b917547Saliguori     TranslationBlock *current_tb = NULL;
9916b917547Saliguori     int current_tb_modified = 0;
9926b917547Saliguori     target_ulong current_pc = 0;
9936b917547Saliguori     target_ulong current_cs_base = 0;
9946b917547Saliguori     int current_flags = 0;
9956b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9969fa3e853Sbellard 
9979fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9989fa3e853Sbellard     if (!p)
9999fa3e853Sbellard         return;
10009fa3e853Sbellard     if (!p->code_bitmap &&
1001d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1002d720b93dSbellard         is_cpu_write_access) {
10039fa3e853Sbellard         /* build code bitmap */
10049fa3e853Sbellard         build_page_bitmap(p);
10059fa3e853Sbellard     }
10069fa3e853Sbellard 
10079fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
10089fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
10099fa3e853Sbellard     tb = p->first_tb;
10109fa3e853Sbellard     while (tb != NULL) {
10119fa3e853Sbellard         n = (long)tb & 3;
10129fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
10139fa3e853Sbellard         tb_next = tb->page_next[n];
10149fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10159fa3e853Sbellard         if (n == 0) {
10169fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10179fa3e853Sbellard                it is not a problem */
10189fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
10199fa3e853Sbellard             tb_end = tb_start + tb->size;
10209fa3e853Sbellard         } else {
10219fa3e853Sbellard             tb_start = tb->page_addr[1];
10229fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10239fa3e853Sbellard         }
10249fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1025d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1026d720b93dSbellard             if (current_tb_not_found) {
1027d720b93dSbellard                 current_tb_not_found = 0;
1028d720b93dSbellard                 current_tb = NULL;
10292e70f6efSpbrook                 if (env->mem_io_pc) {
1030d720b93dSbellard                     /* now we have a real cpu fault */
10312e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1032d720b93dSbellard                 }
1033d720b93dSbellard             }
1034d720b93dSbellard             if (current_tb == tb &&
10352e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1036d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1037d720b93dSbellard                 its execution. We could be more precise by checking
1038d720b93dSbellard                 that the modification is after the current PC, but it
1039d720b93dSbellard                 would require a specialized function to partially
1040d720b93dSbellard                 restore the CPU state */
1041d720b93dSbellard 
1042d720b93dSbellard                 current_tb_modified = 1;
1043d720b93dSbellard                 cpu_restore_state(current_tb, env,
10442e70f6efSpbrook                                   env->mem_io_pc, NULL);
10456b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10466b917547Saliguori                                      &current_flags);
1047d720b93dSbellard             }
1048d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10496f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10506f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10516f5a9f7eSbellard             saved_tb = NULL;
10526f5a9f7eSbellard             if (env) {
1053ea1c1802Sbellard                 saved_tb = env->current_tb;
1054ea1c1802Sbellard                 env->current_tb = NULL;
10556f5a9f7eSbellard             }
10569fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10576f5a9f7eSbellard             if (env) {
1058ea1c1802Sbellard                 env->current_tb = saved_tb;
1059ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1060ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10619fa3e853Sbellard             }
10626f5a9f7eSbellard         }
10639fa3e853Sbellard         tb = tb_next;
10649fa3e853Sbellard     }
10659fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10669fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10679fa3e853Sbellard     if (!p->first_tb) {
10689fa3e853Sbellard         invalidate_page_bitmap(p);
1069d720b93dSbellard         if (is_cpu_write_access) {
10702e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1071d720b93dSbellard         }
1072d720b93dSbellard     }
1073d720b93dSbellard #endif
1074d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1075d720b93dSbellard     if (current_tb_modified) {
1076d720b93dSbellard         /* we generate a block containing just the instruction
1077d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1078d720b93dSbellard            itself */
1079ea1c1802Sbellard         env->current_tb = NULL;
10802e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1081d720b93dSbellard         cpu_resume_from_signal(env, NULL);
10829fa3e853Sbellard     }
10839fa3e853Sbellard #endif
10849fa3e853Sbellard }
10859fa3e853Sbellard 
10869fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
108741c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
10889fa3e853Sbellard {
10899fa3e853Sbellard     PageDesc *p;
10909fa3e853Sbellard     int offset, b;
109159817ccbSbellard #if 0
1092a4193c8aSbellard     if (1) {
109393fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10942e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1095a4193c8aSbellard                   cpu_single_env->eip,
1096a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1097a4193c8aSbellard     }
109859817ccbSbellard #endif
10999fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11009fa3e853Sbellard     if (!p)
11019fa3e853Sbellard         return;
11029fa3e853Sbellard     if (p->code_bitmap) {
11039fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
11049fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
11059fa3e853Sbellard         if (b & ((1 << len) - 1))
11069fa3e853Sbellard             goto do_invalidate;
11079fa3e853Sbellard     } else {
11089fa3e853Sbellard     do_invalidate:
1109d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
11109fa3e853Sbellard     }
11119fa3e853Sbellard }
11129fa3e853Sbellard 
11139fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
111441c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
1115d720b93dSbellard                                     unsigned long pc, void *puc)
11169fa3e853Sbellard {
11176b917547Saliguori     TranslationBlock *tb;
11189fa3e853Sbellard     PageDesc *p;
11196b917547Saliguori     int n;
1120d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
11216b917547Saliguori     TranslationBlock *current_tb = NULL;
1122d720b93dSbellard     CPUState *env = cpu_single_env;
11236b917547Saliguori     int current_tb_modified = 0;
11246b917547Saliguori     target_ulong current_pc = 0;
11256b917547Saliguori     target_ulong current_cs_base = 0;
11266b917547Saliguori     int current_flags = 0;
1127d720b93dSbellard #endif
11289fa3e853Sbellard 
11299fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
11309fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1131fd6ce8f6Sbellard     if (!p)
1132fd6ce8f6Sbellard         return;
1133fd6ce8f6Sbellard     tb = p->first_tb;
1134d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1135d720b93dSbellard     if (tb && pc != 0) {
1136d720b93dSbellard         current_tb = tb_find_pc(pc);
1137d720b93dSbellard     }
1138d720b93dSbellard #endif
1139fd6ce8f6Sbellard     while (tb != NULL) {
11409fa3e853Sbellard         n = (long)tb & 3;
11419fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1142d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1143d720b93dSbellard         if (current_tb == tb &&
11442e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1145d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1146d720b93dSbellard                    its execution. We could be more precise by checking
1147d720b93dSbellard                    that the modification is after the current PC, but it
1148d720b93dSbellard                    would require a specialized function to partially
1149d720b93dSbellard                    restore the CPU state */
1150d720b93dSbellard 
1151d720b93dSbellard             current_tb_modified = 1;
1152d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
11536b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11546b917547Saliguori                                  &current_flags);
1155d720b93dSbellard         }
1156d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11579fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11589fa3e853Sbellard         tb = tb->page_next[n];
1159fd6ce8f6Sbellard     }
1160fd6ce8f6Sbellard     p->first_tb = NULL;
1161d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1162d720b93dSbellard     if (current_tb_modified) {
1163d720b93dSbellard         /* we generate a block containing just the instruction
1164d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1165d720b93dSbellard            itself */
1166ea1c1802Sbellard         env->current_tb = NULL;
11672e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1168d720b93dSbellard         cpu_resume_from_signal(env, puc);
1169d720b93dSbellard     }
1170d720b93dSbellard #endif
1171fd6ce8f6Sbellard }
11729fa3e853Sbellard #endif
1173fd6ce8f6Sbellard 
1174fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
11759fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
117641c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1177fd6ce8f6Sbellard {
1178fd6ce8f6Sbellard     PageDesc *p;
11799fa3e853Sbellard     TranslationBlock *last_first_tb;
11809fa3e853Sbellard 
11819fa3e853Sbellard     tb->page_addr[n] = page_addr;
11825cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
11839fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11849fa3e853Sbellard     last_first_tb = p->first_tb;
11859fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11869fa3e853Sbellard     invalidate_page_bitmap(p);
11879fa3e853Sbellard 
1188107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1189d720b93dSbellard 
11909fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11919fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
119253a5960aSpbrook         target_ulong addr;
119353a5960aSpbrook         PageDesc *p2;
1194fd6ce8f6Sbellard         int prot;
1195fd6ce8f6Sbellard 
1196fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1197fd6ce8f6Sbellard            page fault + mprotect overhead) */
119853a5960aSpbrook         page_addr &= qemu_host_page_mask;
1199fd6ce8f6Sbellard         prot = 0;
120053a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
120153a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
120253a5960aSpbrook 
120353a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
120453a5960aSpbrook             if (!p2)
120553a5960aSpbrook                 continue;
120653a5960aSpbrook             prot |= p2->flags;
120753a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
120853a5960aSpbrook           }
120953a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1210fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1211fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1212ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
121353a5960aSpbrook                page_addr);
1214fd6ce8f6Sbellard #endif
1215fd6ce8f6Sbellard     }
12169fa3e853Sbellard #else
12179fa3e853Sbellard     /* if some code is already present, then the pages are already
12189fa3e853Sbellard        protected. So we handle the case where only the first TB is
12199fa3e853Sbellard        allocated in a physical page */
12209fa3e853Sbellard     if (!last_first_tb) {
12216a00d601Sbellard         tlb_protect_code(page_addr);
12229fa3e853Sbellard     }
12239fa3e853Sbellard #endif
1224d720b93dSbellard 
1225d720b93dSbellard #endif /* TARGET_HAS_SMC */
1226fd6ce8f6Sbellard }
1227fd6ce8f6Sbellard 
1228fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1229fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1230c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1231fd6ce8f6Sbellard {
1232fd6ce8f6Sbellard     TranslationBlock *tb;
1233fd6ce8f6Sbellard 
123426a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
123526a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1236d4e8164fSbellard         return NULL;
1237fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1238fd6ce8f6Sbellard     tb->pc = pc;
1239b448f2f3Sbellard     tb->cflags = 0;
1240d4e8164fSbellard     return tb;
1241d4e8164fSbellard }
1242d4e8164fSbellard 
12432e70f6efSpbrook void tb_free(TranslationBlock *tb)
12442e70f6efSpbrook {
1245bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
12462e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
12472e70f6efSpbrook        be the last one generated.  */
12482e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
12492e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
12502e70f6efSpbrook         nb_tbs--;
12512e70f6efSpbrook     }
12522e70f6efSpbrook }
12532e70f6efSpbrook 
12549fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12559fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
125641c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
125741c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1258d4e8164fSbellard {
12599fa3e853Sbellard     unsigned int h;
12609fa3e853Sbellard     TranslationBlock **ptb;
12619fa3e853Sbellard 
1262c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1263c8a706feSpbrook        before we are done.  */
1264c8a706feSpbrook     mmap_lock();
12659fa3e853Sbellard     /* add in the physical hash table */
12669fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12679fa3e853Sbellard     ptb = &tb_phys_hash[h];
12689fa3e853Sbellard     tb->phys_hash_next = *ptb;
12699fa3e853Sbellard     *ptb = tb;
1270fd6ce8f6Sbellard 
1271fd6ce8f6Sbellard     /* add in the page list */
12729fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12739fa3e853Sbellard     if (phys_page2 != -1)
12749fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12759fa3e853Sbellard     else
12769fa3e853Sbellard         tb->page_addr[1] = -1;
12779fa3e853Sbellard 
1278d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1279d4e8164fSbellard     tb->jmp_next[0] = NULL;
1280d4e8164fSbellard     tb->jmp_next[1] = NULL;
1281d4e8164fSbellard 
1282d4e8164fSbellard     /* init original jump addresses */
1283d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1284d4e8164fSbellard         tb_reset_jump(tb, 0);
1285d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1286d4e8164fSbellard         tb_reset_jump(tb, 1);
12878a40a180Sbellard 
12888a40a180Sbellard #ifdef DEBUG_TB_CHECK
12898a40a180Sbellard     tb_page_check();
12908a40a180Sbellard #endif
1291c8a706feSpbrook     mmap_unlock();
1292fd6ce8f6Sbellard }
1293fd6ce8f6Sbellard 
1294a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1295a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1296a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1297a513fe19Sbellard {
1298a513fe19Sbellard     int m_min, m_max, m;
1299a513fe19Sbellard     unsigned long v;
1300a513fe19Sbellard     TranslationBlock *tb;
1301a513fe19Sbellard 
1302a513fe19Sbellard     if (nb_tbs <= 0)
1303a513fe19Sbellard         return NULL;
1304a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1305a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1306a513fe19Sbellard         return NULL;
1307a513fe19Sbellard     /* binary search (cf Knuth) */
1308a513fe19Sbellard     m_min = 0;
1309a513fe19Sbellard     m_max = nb_tbs - 1;
1310a513fe19Sbellard     while (m_min <= m_max) {
1311a513fe19Sbellard         m = (m_min + m_max) >> 1;
1312a513fe19Sbellard         tb = &tbs[m];
1313a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1314a513fe19Sbellard         if (v == tc_ptr)
1315a513fe19Sbellard             return tb;
1316a513fe19Sbellard         else if (tc_ptr < v) {
1317a513fe19Sbellard             m_max = m - 1;
1318a513fe19Sbellard         } else {
1319a513fe19Sbellard             m_min = m + 1;
1320a513fe19Sbellard         }
1321a513fe19Sbellard     }
1322a513fe19Sbellard     return &tbs[m_max];
1323a513fe19Sbellard }
13247501267eSbellard 
1325ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1326ea041c0eSbellard 
1327ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1328ea041c0eSbellard {
1329ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1330ea041c0eSbellard     unsigned int n1;
1331ea041c0eSbellard 
1332ea041c0eSbellard     tb1 = tb->jmp_next[n];
1333ea041c0eSbellard     if (tb1 != NULL) {
1334ea041c0eSbellard         /* find head of list */
1335ea041c0eSbellard         for(;;) {
1336ea041c0eSbellard             n1 = (long)tb1 & 3;
1337ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1338ea041c0eSbellard             if (n1 == 2)
1339ea041c0eSbellard                 break;
1340ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1341ea041c0eSbellard         }
1342ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1343ea041c0eSbellard         tb_next = tb1;
1344ea041c0eSbellard 
1345ea041c0eSbellard         /* remove tb from the jmp_first list */
1346ea041c0eSbellard         ptb = &tb_next->jmp_first;
1347ea041c0eSbellard         for(;;) {
1348ea041c0eSbellard             tb1 = *ptb;
1349ea041c0eSbellard             n1 = (long)tb1 & 3;
1350ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1351ea041c0eSbellard             if (n1 == n && tb1 == tb)
1352ea041c0eSbellard                 break;
1353ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1354ea041c0eSbellard         }
1355ea041c0eSbellard         *ptb = tb->jmp_next[n];
1356ea041c0eSbellard         tb->jmp_next[n] = NULL;
1357ea041c0eSbellard 
1358ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1359ea041c0eSbellard         tb_reset_jump(tb, n);
1360ea041c0eSbellard 
13610124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1362ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1363ea041c0eSbellard     }
1364ea041c0eSbellard }
1365ea041c0eSbellard 
1366ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1367ea041c0eSbellard {
1368ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1369ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1370ea041c0eSbellard }
1371ea041c0eSbellard 
13721fddef4bSbellard #if defined(TARGET_HAS_ICE)
137394df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
137494df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
137594df27fdSPaul Brook {
137694df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
137794df27fdSPaul Brook }
137894df27fdSPaul Brook #else
1379d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380d720b93dSbellard {
1381c227f099SAnthony Liguori     target_phys_addr_t addr;
13829b3c35e0Sj_mayer     target_ulong pd;
1383c227f099SAnthony Liguori     ram_addr_t ram_addr;
1384c2f07f81Spbrook     PhysPageDesc *p;
1385d720b93dSbellard 
1386c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1387c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1388c2f07f81Spbrook     if (!p) {
1389c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1390c2f07f81Spbrook     } else {
1391c2f07f81Spbrook         pd = p->phys_offset;
1392c2f07f81Spbrook     }
1393c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1394706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1395d720b93dSbellard }
1396c27004ecSbellard #endif
139794df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1398d720b93dSbellard 
1399c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1400c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1401c527ee8fSPaul Brook 
1402c527ee8fSPaul Brook {
1403c527ee8fSPaul Brook }
1404c527ee8fSPaul Brook 
1405c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1407c527ee8fSPaul Brook {
1408c527ee8fSPaul Brook     return -ENOSYS;
1409c527ee8fSPaul Brook }
1410c527ee8fSPaul Brook #else
14116658ffb8Spbrook /* Add a watchpoint.  */
1412a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1413a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
14146658ffb8Spbrook {
1415b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1416c0ce998eSaliguori     CPUWatchpoint *wp;
14176658ffb8Spbrook 
1418b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1419b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1420b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1421b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1422b4051334Saliguori         return -EINVAL;
1423b4051334Saliguori     }
1424a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
14256658ffb8Spbrook 
1426a1d1bb31Saliguori     wp->vaddr = addr;
1427b4051334Saliguori     wp->len_mask = len_mask;
1428a1d1bb31Saliguori     wp->flags = flags;
1429a1d1bb31Saliguori 
14302dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1431c0ce998eSaliguori     if (flags & BP_GDB)
143272cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1433c0ce998eSaliguori     else
143472cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1435a1d1bb31Saliguori 
14366658ffb8Spbrook     tlb_flush_page(env, addr);
1437a1d1bb31Saliguori 
1438a1d1bb31Saliguori     if (watchpoint)
1439a1d1bb31Saliguori         *watchpoint = wp;
1440a1d1bb31Saliguori     return 0;
14416658ffb8Spbrook }
14426658ffb8Spbrook 
1443a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1444a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1445a1d1bb31Saliguori                           int flags)
14466658ffb8Spbrook {
1447b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1448a1d1bb31Saliguori     CPUWatchpoint *wp;
14496658ffb8Spbrook 
145072cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1451b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14526e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1453a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14546658ffb8Spbrook             return 0;
14556658ffb8Spbrook         }
14566658ffb8Spbrook     }
1457a1d1bb31Saliguori     return -ENOENT;
14586658ffb8Spbrook }
14596658ffb8Spbrook 
1460a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1461a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1462a1d1bb31Saliguori {
146372cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14647d03f82fSedgar_igl 
1465a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1466a1d1bb31Saliguori 
1467a1d1bb31Saliguori     qemu_free(watchpoint);
14687d03f82fSedgar_igl }
14697d03f82fSedgar_igl 
1470a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1471a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1472a1d1bb31Saliguori {
1473c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1474a1d1bb31Saliguori 
147572cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1476a1d1bb31Saliguori         if (wp->flags & mask)
1477a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1478a1d1bb31Saliguori     }
1479c0ce998eSaliguori }
1480c527ee8fSPaul Brook #endif
1481a1d1bb31Saliguori 
1482a1d1bb31Saliguori /* Add a breakpoint.  */
1483a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1484a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14854c3a88a2Sbellard {
14861fddef4bSbellard #if defined(TARGET_HAS_ICE)
1487c0ce998eSaliguori     CPUBreakpoint *bp;
14884c3a88a2Sbellard 
1489a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14904c3a88a2Sbellard 
1491a1d1bb31Saliguori     bp->pc = pc;
1492a1d1bb31Saliguori     bp->flags = flags;
1493a1d1bb31Saliguori 
14942dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1495c0ce998eSaliguori     if (flags & BP_GDB)
149672cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1497c0ce998eSaliguori     else
149872cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1499d720b93dSbellard 
1500d720b93dSbellard     breakpoint_invalidate(env, pc);
1501a1d1bb31Saliguori 
1502a1d1bb31Saliguori     if (breakpoint)
1503a1d1bb31Saliguori         *breakpoint = bp;
15044c3a88a2Sbellard     return 0;
15054c3a88a2Sbellard #else
1506a1d1bb31Saliguori     return -ENOSYS;
15074c3a88a2Sbellard #endif
15084c3a88a2Sbellard }
15094c3a88a2Sbellard 
1510a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1511a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1512a1d1bb31Saliguori {
15137d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1514a1d1bb31Saliguori     CPUBreakpoint *bp;
1515a1d1bb31Saliguori 
151672cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1517a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1518a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1519a1d1bb31Saliguori             return 0;
15207d03f82fSedgar_igl         }
1521a1d1bb31Saliguori     }
1522a1d1bb31Saliguori     return -ENOENT;
1523a1d1bb31Saliguori #else
1524a1d1bb31Saliguori     return -ENOSYS;
15257d03f82fSedgar_igl #endif
15267d03f82fSedgar_igl }
15277d03f82fSedgar_igl 
1528a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1529a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
15304c3a88a2Sbellard {
15311fddef4bSbellard #if defined(TARGET_HAS_ICE)
153272cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1533d720b93dSbellard 
1534a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1535a1d1bb31Saliguori 
1536a1d1bb31Saliguori     qemu_free(breakpoint);
1537a1d1bb31Saliguori #endif
1538a1d1bb31Saliguori }
1539a1d1bb31Saliguori 
1540a1d1bb31Saliguori /* Remove all matching breakpoints. */
1541a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1542a1d1bb31Saliguori {
1543a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1544c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1545a1d1bb31Saliguori 
154672cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1547a1d1bb31Saliguori         if (bp->flags & mask)
1548a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1549c0ce998eSaliguori     }
15504c3a88a2Sbellard #endif
15514c3a88a2Sbellard }
15524c3a88a2Sbellard 
1553c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1554c33a346eSbellard    CPU loop after each instruction */
1555c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1556c33a346eSbellard {
15571fddef4bSbellard #if defined(TARGET_HAS_ICE)
1558c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1559c33a346eSbellard         env->singlestep_enabled = enabled;
1560e22a25c9Saliguori         if (kvm_enabled())
1561e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1562e22a25c9Saliguori         else {
1563ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15649fa3e853Sbellard             /* XXX: only flush what is necessary */
15650124311eSbellard             tb_flush(env);
1566c33a346eSbellard         }
1567e22a25c9Saliguori     }
1568c33a346eSbellard #endif
1569c33a346eSbellard }
1570c33a346eSbellard 
157134865134Sbellard /* enable or disable low levels log */
157234865134Sbellard void cpu_set_log(int log_flags)
157334865134Sbellard {
157434865134Sbellard     loglevel = log_flags;
157534865134Sbellard     if (loglevel && !logfile) {
157611fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
157734865134Sbellard         if (!logfile) {
157834865134Sbellard             perror(logfilename);
157934865134Sbellard             _exit(1);
158034865134Sbellard         }
15819fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15829fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15839fa3e853Sbellard         {
1584b55266b5Sblueswir1             static char logfile_buf[4096];
15859fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15869fa3e853Sbellard         }
1587bf65f53fSFilip Navara #elif !defined(_WIN32)
1588bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
158934865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15909fa3e853Sbellard #endif
1591e735b91cSpbrook         log_append = 1;
1592e735b91cSpbrook     }
1593e735b91cSpbrook     if (!loglevel && logfile) {
1594e735b91cSpbrook         fclose(logfile);
1595e735b91cSpbrook         logfile = NULL;
159634865134Sbellard     }
159734865134Sbellard }
159834865134Sbellard 
159934865134Sbellard void cpu_set_log_filename(const char *filename)
160034865134Sbellard {
160134865134Sbellard     logfilename = strdup(filename);
1602e735b91cSpbrook     if (logfile) {
1603e735b91cSpbrook         fclose(logfile);
1604e735b91cSpbrook         logfile = NULL;
1605e735b91cSpbrook     }
1606e735b91cSpbrook     cpu_set_log(loglevel);
160734865134Sbellard }
1608c33a346eSbellard 
16093098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1610ea041c0eSbellard {
1611d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1612d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1613d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1614d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16153098dba0Saurel32     TranslationBlock *tb;
1616c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16173098dba0Saurel32 
1618cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16193098dba0Saurel32     tb = env->current_tb;
16203098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16213098dba0Saurel32        all the potentially executing TB */
1622f76cfe56SRiku Voipio     if (tb) {
16233098dba0Saurel32         env->current_tb = NULL;
16243098dba0Saurel32         tb_reset_jump_recursive(tb);
16253098dba0Saurel32     }
1626cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16273098dba0Saurel32 }
16283098dba0Saurel32 
16293098dba0Saurel32 /* mask must never be zero, except for A20 change call */
16303098dba0Saurel32 void cpu_interrupt(CPUState *env, int mask)
16313098dba0Saurel32 {
16323098dba0Saurel32     int old_mask;
16333098dba0Saurel32 
16343098dba0Saurel32     old_mask = env->interrupt_request;
16353098dba0Saurel32     env->interrupt_request |= mask;
16363098dba0Saurel32 
16378edac960Saliguori #ifndef CONFIG_USER_ONLY
16388edac960Saliguori     /*
16398edac960Saliguori      * If called from iothread context, wake the target cpu in
16408edac960Saliguori      * case its halted.
16418edac960Saliguori      */
16428edac960Saliguori     if (!qemu_cpu_self(env)) {
16438edac960Saliguori         qemu_cpu_kick(env);
16448edac960Saliguori         return;
16458edac960Saliguori     }
16468edac960Saliguori #endif
16478edac960Saliguori 
16482e70f6efSpbrook     if (use_icount) {
1649266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16502e70f6efSpbrook #ifndef CONFIG_USER_ONLY
16512e70f6efSpbrook         if (!can_do_io(env)
1652be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16532e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16542e70f6efSpbrook         }
16552e70f6efSpbrook #endif
16562e70f6efSpbrook     } else {
16573098dba0Saurel32         cpu_unlink_tb(env);
1658ea041c0eSbellard     }
16592e70f6efSpbrook }
1660ea041c0eSbellard 
1661b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1662b54ad049Sbellard {
1663b54ad049Sbellard     env->interrupt_request &= ~mask;
1664b54ad049Sbellard }
1665b54ad049Sbellard 
16663098dba0Saurel32 void cpu_exit(CPUState *env)
16673098dba0Saurel32 {
16683098dba0Saurel32     env->exit_request = 1;
16693098dba0Saurel32     cpu_unlink_tb(env);
16703098dba0Saurel32 }
16713098dba0Saurel32 
1672c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1673f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1674f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1675f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1676f193c797Sbellard       "show target assembly code for each compiled TB" },
1677f193c797Sbellard     { CPU_LOG_TB_OP, "op",
167857fec1feSbellard       "show micro ops for each compiled TB" },
1679f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1680e01a1157Sblueswir1       "show micro ops "
1681e01a1157Sblueswir1 #ifdef TARGET_I386
1682e01a1157Sblueswir1       "before eflags optimization and "
1683f193c797Sbellard #endif
1684e01a1157Sblueswir1       "after liveness analysis" },
1685f193c797Sbellard     { CPU_LOG_INT, "int",
1686f193c797Sbellard       "show interrupts/exceptions in short format" },
1687f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1688f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
16899fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1690e91c8a77Sths       "show CPU state before block translation" },
1691f193c797Sbellard #ifdef TARGET_I386
1692f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1693f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1694eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1695eca1bdf4Saliguori       "show CPU state before CPU resets" },
1696f193c797Sbellard #endif
16978e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1698fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1699fd872598Sbellard       "show all i/o ports accesses" },
17008e3a9fd2Sbellard #endif
1701f193c797Sbellard     { 0, NULL, NULL },
1702f193c797Sbellard };
1703f193c797Sbellard 
1704f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1705f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1706f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1707f6f3fbcaSMichael S. Tsirkin 
1708f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1709f6f3fbcaSMichael S. Tsirkin                                   ram_addr_t size,
1710f6f3fbcaSMichael S. Tsirkin                                   ram_addr_t phys_offset)
1711f6f3fbcaSMichael S. Tsirkin {
1712f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1713f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1714f6f3fbcaSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset);
1715f6f3fbcaSMichael S. Tsirkin     }
1716f6f3fbcaSMichael S. Tsirkin }
1717f6f3fbcaSMichael S. Tsirkin 
1718f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1719f6f3fbcaSMichael S. Tsirkin                                         target_phys_addr_t end)
1720f6f3fbcaSMichael S. Tsirkin {
1721f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1722f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1723f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1724f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1725f6f3fbcaSMichael S. Tsirkin             return r;
1726f6f3fbcaSMichael S. Tsirkin     }
1727f6f3fbcaSMichael S. Tsirkin     return 0;
1728f6f3fbcaSMichael S. Tsirkin }
1729f6f3fbcaSMichael S. Tsirkin 
1730f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1731f6f3fbcaSMichael S. Tsirkin {
1732f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1733f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1734f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1735f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1736f6f3fbcaSMichael S. Tsirkin             return r;
1737f6f3fbcaSMichael S. Tsirkin     }
1738f6f3fbcaSMichael S. Tsirkin     return 0;
1739f6f3fbcaSMichael S. Tsirkin }
1740f6f3fbcaSMichael S. Tsirkin 
17415cd2c5b6SRichard Henderson static void phys_page_for_each_1(CPUPhysMemoryClient *client,
17425cd2c5b6SRichard Henderson                                  int level, void **lp)
1743f6f3fbcaSMichael S. Tsirkin {
17445cd2c5b6SRichard Henderson     int i;
1745f6f3fbcaSMichael S. Tsirkin 
17465cd2c5b6SRichard Henderson     if (*lp == NULL) {
17475cd2c5b6SRichard Henderson         return;
1748f6f3fbcaSMichael S. Tsirkin     }
17495cd2c5b6SRichard Henderson     if (level == 0) {
17505cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17517296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17525cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17535cd2c5b6SRichard Henderson                 client->set_memory(client, pd[i].region_offset,
17545cd2c5b6SRichard Henderson                                    TARGET_PAGE_SIZE, pd[i].phys_offset);
1755f6f3fbcaSMichael S. Tsirkin             }
17565cd2c5b6SRichard Henderson         }
17575cd2c5b6SRichard Henderson     } else {
17585cd2c5b6SRichard Henderson         void **pp = *lp;
17597296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17605cd2c5b6SRichard Henderson             phys_page_for_each_1(client, level - 1, pp + i);
1761f6f3fbcaSMichael S. Tsirkin         }
1762f6f3fbcaSMichael S. Tsirkin     }
1763f6f3fbcaSMichael S. Tsirkin }
1764f6f3fbcaSMichael S. Tsirkin 
1765f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1766f6f3fbcaSMichael S. Tsirkin {
17675cd2c5b6SRichard Henderson     int i;
17685cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
17695cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
17705cd2c5b6SRichard Henderson                              l1_phys_map + 1);
1771f6f3fbcaSMichael S. Tsirkin     }
1772f6f3fbcaSMichael S. Tsirkin }
1773f6f3fbcaSMichael S. Tsirkin 
1774f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1775f6f3fbcaSMichael S. Tsirkin {
1776f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1777f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1778f6f3fbcaSMichael S. Tsirkin }
1779f6f3fbcaSMichael S. Tsirkin 
1780f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1781f6f3fbcaSMichael S. Tsirkin {
1782f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1783f6f3fbcaSMichael S. Tsirkin }
1784f6f3fbcaSMichael S. Tsirkin #endif
1785f6f3fbcaSMichael S. Tsirkin 
1786f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1787f193c797Sbellard {
1788f193c797Sbellard     if (strlen(s2) != n)
1789f193c797Sbellard         return 0;
1790f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1791f193c797Sbellard }
1792f193c797Sbellard 
1793f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1794f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1795f193c797Sbellard {
1796c7cd6a37Sblueswir1     const CPULogItem *item;
1797f193c797Sbellard     int mask;
1798f193c797Sbellard     const char *p, *p1;
1799f193c797Sbellard 
1800f193c797Sbellard     p = str;
1801f193c797Sbellard     mask = 0;
1802f193c797Sbellard     for(;;) {
1803f193c797Sbellard         p1 = strchr(p, ',');
1804f193c797Sbellard         if (!p1)
1805f193c797Sbellard             p1 = p + strlen(p);
18068e3a9fd2Sbellard         if(cmp1(p,p1-p,"all")) {
18078e3a9fd2Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
18088e3a9fd2Sbellard                 mask |= item->mask;
18098e3a9fd2Sbellard             }
18108e3a9fd2Sbellard         } else {
1811f193c797Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
1812f193c797Sbellard                 if (cmp1(p, p1 - p, item->name))
1813f193c797Sbellard                     goto found;
1814f193c797Sbellard             }
1815f193c797Sbellard             return 0;
18168e3a9fd2Sbellard         }
1817f193c797Sbellard     found:
1818f193c797Sbellard         mask |= item->mask;
1819f193c797Sbellard         if (*p1 != ',')
1820f193c797Sbellard             break;
1821f193c797Sbellard         p = p1 + 1;
1822f193c797Sbellard     }
1823f193c797Sbellard     return mask;
1824f193c797Sbellard }
1825ea041c0eSbellard 
18267501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
18277501267eSbellard {
18287501267eSbellard     va_list ap;
1829493ae1f0Spbrook     va_list ap2;
18307501267eSbellard 
18317501267eSbellard     va_start(ap, fmt);
1832493ae1f0Spbrook     va_copy(ap2, ap);
18337501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18347501267eSbellard     vfprintf(stderr, fmt, ap);
18357501267eSbellard     fprintf(stderr, "\n");
18367501267eSbellard #ifdef TARGET_I386
18377fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
18387fe48483Sbellard #else
18397fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
18407501267eSbellard #endif
184193fcfe39Saliguori     if (qemu_log_enabled()) {
184293fcfe39Saliguori         qemu_log("qemu: fatal: ");
184393fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
184493fcfe39Saliguori         qemu_log("\n");
1845f9373291Sj_mayer #ifdef TARGET_I386
184693fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1847f9373291Sj_mayer #else
184893fcfe39Saliguori         log_cpu_state(env, 0);
1849f9373291Sj_mayer #endif
185031b1a7b4Saliguori         qemu_log_flush();
185193fcfe39Saliguori         qemu_log_close();
1852924edcaeSbalrog     }
1853493ae1f0Spbrook     va_end(ap2);
1854f9373291Sj_mayer     va_end(ap);
1855fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1856fd052bf6SRiku Voipio     {
1857fd052bf6SRiku Voipio         struct sigaction act;
1858fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1859fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1860fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1861fd052bf6SRiku Voipio     }
1862fd052bf6SRiku Voipio #endif
18637501267eSbellard     abort();
18647501267eSbellard }
18657501267eSbellard 
1866c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1867c5be9f08Sths {
186801ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1869c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1870c5be9f08Sths     int cpu_index = new_env->cpu_index;
18715a38f081Saliguori #if defined(TARGET_HAS_ICE)
18725a38f081Saliguori     CPUBreakpoint *bp;
18735a38f081Saliguori     CPUWatchpoint *wp;
18745a38f081Saliguori #endif
18755a38f081Saliguori 
1876c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
18775a38f081Saliguori 
18785a38f081Saliguori     /* Preserve chaining and index. */
1879c5be9f08Sths     new_env->next_cpu = next_cpu;
1880c5be9f08Sths     new_env->cpu_index = cpu_index;
18815a38f081Saliguori 
18825a38f081Saliguori     /* Clone all break/watchpoints.
18835a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
18845a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
188572cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
188672cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
18875a38f081Saliguori #if defined(TARGET_HAS_ICE)
188872cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
18895a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
18905a38f081Saliguori     }
189172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18925a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18935a38f081Saliguori                               wp->flags, NULL);
18945a38f081Saliguori     }
18955a38f081Saliguori #endif
18965a38f081Saliguori 
1897c5be9f08Sths     return new_env;
1898c5be9f08Sths }
1899c5be9f08Sths 
19000124311eSbellard #if !defined(CONFIG_USER_ONLY)
19010124311eSbellard 
19025c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
19035c751e99Sedgar_igl {
19045c751e99Sedgar_igl     unsigned int i;
19055c751e99Sedgar_igl 
19065c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
19075c751e99Sedgar_igl        overlap the flushed page.  */
19085c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
19095c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19105c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19115c751e99Sedgar_igl 
19125c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
19135c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19145c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19155c751e99Sedgar_igl }
19165c751e99Sedgar_igl 
191708738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
191808738984SIgor Kovalenko     .addr_read  = -1,
191908738984SIgor Kovalenko     .addr_write = -1,
192008738984SIgor Kovalenko     .addr_code  = -1,
192108738984SIgor Kovalenko     .addend     = -1,
192208738984SIgor Kovalenko };
192308738984SIgor Kovalenko 
1924ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1925ee8b7021Sbellard    implemented yet) */
1926ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
192733417e70Sbellard {
192833417e70Sbellard     int i;
19290124311eSbellard 
19309fa3e853Sbellard #if defined(DEBUG_TLB)
19319fa3e853Sbellard     printf("tlb_flush:\n");
19329fa3e853Sbellard #endif
19330124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19340124311eSbellard        links while we are modifying them */
19350124311eSbellard     env->current_tb = NULL;
19360124311eSbellard 
193733417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1938cfde4bd9SIsaku Yamahata         int mmu_idx;
1939cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
194008738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1941cfde4bd9SIsaku Yamahata         }
194233417e70Sbellard     }
19439fa3e853Sbellard 
19448a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
19459fa3e853Sbellard 
1946d4c430a8SPaul Brook     env->tlb_flush_addr = -1;
1947d4c430a8SPaul Brook     env->tlb_flush_mask = 0;
1948e3db7226Sbellard     tlb_flush_count++;
194933417e70Sbellard }
195033417e70Sbellard 
1951274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
195261382a50Sbellard {
195384b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
195484b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
195584b7b8e7Sbellard         addr == (tlb_entry->addr_write &
195684b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
195784b7b8e7Sbellard         addr == (tlb_entry->addr_code &
195884b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
195908738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
196084b7b8e7Sbellard     }
196161382a50Sbellard }
196261382a50Sbellard 
19632e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
196433417e70Sbellard {
19658a40a180Sbellard     int i;
1966cfde4bd9SIsaku Yamahata     int mmu_idx;
19670124311eSbellard 
19689fa3e853Sbellard #if defined(DEBUG_TLB)
1969108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
19709fa3e853Sbellard #endif
1971d4c430a8SPaul Brook     /* Check if we need to flush due to large pages.  */
1972d4c430a8SPaul Brook     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1973d4c430a8SPaul Brook #if defined(DEBUG_TLB)
1974d4c430a8SPaul Brook         printf("tlb_flush_page: forced full flush ("
1975d4c430a8SPaul Brook                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1976d4c430a8SPaul Brook                env->tlb_flush_addr, env->tlb_flush_mask);
1977d4c430a8SPaul Brook #endif
1978d4c430a8SPaul Brook         tlb_flush(env, 1);
1979d4c430a8SPaul Brook         return;
1980d4c430a8SPaul Brook     }
19810124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19820124311eSbellard        links while we are modifying them */
19830124311eSbellard     env->current_tb = NULL;
198433417e70Sbellard 
198561382a50Sbellard     addr &= TARGET_PAGE_MASK;
198633417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1987cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1988cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
19890124311eSbellard 
19905c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
19919fa3e853Sbellard }
19929fa3e853Sbellard 
19939fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
19949fa3e853Sbellard    can be detected */
1995c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
199661382a50Sbellard {
19976a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
19986a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
19996a00d601Sbellard                                     CODE_DIRTY_FLAG);
20009fa3e853Sbellard }
20019fa3e853Sbellard 
20029fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
20033a7d929eSbellard    tested for self modifying code */
2004c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
20053a7d929eSbellard                                     target_ulong vaddr)
20069fa3e853Sbellard {
2007f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
20089fa3e853Sbellard }
20099fa3e853Sbellard 
20101ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
20111ccde1cbSbellard                                          unsigned long start, unsigned long length)
20121ccde1cbSbellard {
20131ccde1cbSbellard     unsigned long addr;
201484b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
201584b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
20161ccde1cbSbellard         if ((addr - start) < length) {
20170f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
20181ccde1cbSbellard         }
20191ccde1cbSbellard     }
20201ccde1cbSbellard }
20211ccde1cbSbellard 
20225579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
2023c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
20240a962c02Sbellard                                      int dirty_flags)
20251ccde1cbSbellard {
20261ccde1cbSbellard     CPUState *env;
20274f2ac237Sbellard     unsigned long length, start1;
2028f7c11b53SYoshiaki Tamura     int i;
20291ccde1cbSbellard 
20301ccde1cbSbellard     start &= TARGET_PAGE_MASK;
20311ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
20321ccde1cbSbellard 
20331ccde1cbSbellard     length = end - start;
20341ccde1cbSbellard     if (length == 0)
20351ccde1cbSbellard         return;
2036f7c11b53SYoshiaki Tamura     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2037f23db169Sbellard 
20381ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
20391ccde1cbSbellard        when accessing the range */
20405579c7f3Spbrook     start1 = (unsigned long)qemu_get_ram_ptr(start);
20415579c7f3Spbrook     /* Chek that we don't span multiple blocks - this breaks the
20425579c7f3Spbrook        address comparisons below.  */
20435579c7f3Spbrook     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
20445579c7f3Spbrook             != (end - 1) - start) {
20455579c7f3Spbrook         abort();
20465579c7f3Spbrook     }
20475579c7f3Spbrook 
20486a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2049cfde4bd9SIsaku Yamahata         int mmu_idx;
2050cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
20511ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2052cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2053cfde4bd9SIsaku Yamahata                                       start1, length);
2054cfde4bd9SIsaku Yamahata         }
20556a00d601Sbellard     }
20561ccde1cbSbellard }
20571ccde1cbSbellard 
205874576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
205974576198Saliguori {
2060f6f3fbcaSMichael S. Tsirkin     int ret = 0;
206174576198Saliguori     in_migration = enable;
2062f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2063f6f3fbcaSMichael S. Tsirkin     return ret;
206474576198Saliguori }
206574576198Saliguori 
206674576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
206774576198Saliguori {
206874576198Saliguori     return in_migration;
206974576198Saliguori }
207074576198Saliguori 
2071c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2072c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
20732bec46dcSaliguori {
20747b8f3b78SMichael S. Tsirkin     int ret;
2075151f7749SJan Kiszka 
2076f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2077151f7749SJan Kiszka     return ret;
20782bec46dcSaliguori }
20792bec46dcSaliguori 
20803a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
20813a7d929eSbellard {
2082c227f099SAnthony Liguori     ram_addr_t ram_addr;
20835579c7f3Spbrook     void *p;
20843a7d929eSbellard 
208584b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
20865579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
20875579c7f3Spbrook             + tlb_entry->addend);
20885579c7f3Spbrook         ram_addr = qemu_ram_addr_from_host(p);
20893a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
20900f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
20913a7d929eSbellard         }
20923a7d929eSbellard     }
20933a7d929eSbellard }
20943a7d929eSbellard 
20953a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
20963a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
20973a7d929eSbellard {
20983a7d929eSbellard     int i;
2099cfde4bd9SIsaku Yamahata     int mmu_idx;
2100cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21013a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2102cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2103cfde4bd9SIsaku Yamahata     }
21043a7d929eSbellard }
21053a7d929eSbellard 
21060f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
21071ccde1cbSbellard {
21080f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
21090f459d16Spbrook         tlb_entry->addr_write = vaddr;
21101ccde1cbSbellard }
21111ccde1cbSbellard 
21120f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
21130f459d16Spbrook    so that it is no longer dirty */
21140f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
21151ccde1cbSbellard {
21161ccde1cbSbellard     int i;
2117cfde4bd9SIsaku Yamahata     int mmu_idx;
21181ccde1cbSbellard 
21190f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
21201ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2121cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2122cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
21231ccde1cbSbellard }
21241ccde1cbSbellard 
2125d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by
2126d4c430a8SPaul Brook    large pages and trigger a full TLB flush if these are invalidated.  */
2127d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2128d4c430a8SPaul Brook                                target_ulong size)
2129d4c430a8SPaul Brook {
2130d4c430a8SPaul Brook     target_ulong mask = ~(size - 1);
2131d4c430a8SPaul Brook 
2132d4c430a8SPaul Brook     if (env->tlb_flush_addr == (target_ulong)-1) {
2133d4c430a8SPaul Brook         env->tlb_flush_addr = vaddr & mask;
2134d4c430a8SPaul Brook         env->tlb_flush_mask = mask;
2135d4c430a8SPaul Brook         return;
2136d4c430a8SPaul Brook     }
2137d4c430a8SPaul Brook     /* Extend the existing region to include the new page.
2138d4c430a8SPaul Brook        This is a compromise between unnecessary flushes and the cost
2139d4c430a8SPaul Brook        of maintaining a full variable size TLB.  */
2140d4c430a8SPaul Brook     mask &= env->tlb_flush_mask;
2141d4c430a8SPaul Brook     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2142d4c430a8SPaul Brook         mask <<= 1;
2143d4c430a8SPaul Brook     }
2144d4c430a8SPaul Brook     env->tlb_flush_addr &= mask;
2145d4c430a8SPaul Brook     env->tlb_flush_mask = mask;
2146d4c430a8SPaul Brook }
2147d4c430a8SPaul Brook 
2148d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address
2149d4c430a8SPaul Brook    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2150d4c430a8SPaul Brook    supplied size is only used by tlb_flush_page.  */
2151d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr,
2152c227f099SAnthony Liguori                   target_phys_addr_t paddr, int prot,
2153d4c430a8SPaul Brook                   int mmu_idx, target_ulong size)
21549fa3e853Sbellard {
215592e873b9Sbellard     PhysPageDesc *p;
21564f2ac237Sbellard     unsigned long pd;
21579fa3e853Sbellard     unsigned int index;
21584f2ac237Sbellard     target_ulong address;
21590f459d16Spbrook     target_ulong code_address;
2160355b1943SPaul Brook     unsigned long addend;
216184b7b8e7Sbellard     CPUTLBEntry *te;
2162a1d1bb31Saliguori     CPUWatchpoint *wp;
2163c227f099SAnthony Liguori     target_phys_addr_t iotlb;
21649fa3e853Sbellard 
2165d4c430a8SPaul Brook     assert(size >= TARGET_PAGE_SIZE);
2166d4c430a8SPaul Brook     if (size != TARGET_PAGE_SIZE) {
2167d4c430a8SPaul Brook         tlb_add_large_page(env, vaddr, size);
2168d4c430a8SPaul Brook     }
216992e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
21709fa3e853Sbellard     if (!p) {
21719fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
21729fa3e853Sbellard     } else {
21739fa3e853Sbellard         pd = p->phys_offset;
21749fa3e853Sbellard     }
21759fa3e853Sbellard #if defined(DEBUG_TLB)
21766ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
21776ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
21789fa3e853Sbellard #endif
21799fa3e853Sbellard 
21809fa3e853Sbellard     address = vaddr;
21810f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
21820f459d16Spbrook         /* IO memory case (romd handled later) */
21830f459d16Spbrook         address |= TLB_MMIO;
21840f459d16Spbrook     }
21855579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
21860f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
21870f459d16Spbrook         /* Normal RAM.  */
21880f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
21890f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
21900f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
21910f459d16Spbrook         else
21920f459d16Spbrook             iotlb |= IO_MEM_ROM;
21930f459d16Spbrook     } else {
2194ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
21950f459d16Spbrook            It would be nice to pass an offset from the base address
21960f459d16Spbrook            of that region.  This would avoid having to special case RAM,
21970f459d16Spbrook            and avoid full address decoding in every device.
21980f459d16Spbrook            We can't use the high bits of pd for this because
21990f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
22008da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
22018da3ff18Spbrook         if (p) {
22028da3ff18Spbrook             iotlb += p->region_offset;
22038da3ff18Spbrook         } else {
22048da3ff18Spbrook             iotlb += paddr;
22058da3ff18Spbrook         }
22069fa3e853Sbellard     }
22079fa3e853Sbellard 
22080f459d16Spbrook     code_address = address;
22096658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
22106658ffb8Spbrook        watchpoint trap routines.  */
221172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2212a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2213bf298f83SJun Koi             /* Avoid trapping reads of pages with a write breakpoint. */
2214bf298f83SJun Koi             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
22150f459d16Spbrook                 iotlb = io_mem_watch + paddr;
22160f459d16Spbrook                 address |= TLB_MMIO;
2217bf298f83SJun Koi                 break;
2218bf298f83SJun Koi             }
22196658ffb8Spbrook         }
22206658ffb8Spbrook     }
22216658ffb8Spbrook 
222290f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
22230f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
22246ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
22250f459d16Spbrook     te->addend = addend - vaddr;
222667b915a5Sbellard     if (prot & PAGE_READ) {
222784b7b8e7Sbellard         te->addr_read = address;
22289fa3e853Sbellard     } else {
222984b7b8e7Sbellard         te->addr_read = -1;
223084b7b8e7Sbellard     }
22315c751e99Sedgar_igl 
223284b7b8e7Sbellard     if (prot & PAGE_EXEC) {
22330f459d16Spbrook         te->addr_code = code_address;
223484b7b8e7Sbellard     } else {
223584b7b8e7Sbellard         te->addr_code = -1;
22369fa3e853Sbellard     }
223767b915a5Sbellard     if (prot & PAGE_WRITE) {
2238856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2239856074ecSbellard             (pd & IO_MEM_ROMD)) {
22400f459d16Spbrook             /* Write access calls the I/O callback.  */
22410f459d16Spbrook             te->addr_write = address | TLB_MMIO;
22423a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
22431ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
22440f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
22459fa3e853Sbellard         } else {
224684b7b8e7Sbellard             te->addr_write = address;
22479fa3e853Sbellard         }
22489fa3e853Sbellard     } else {
224984b7b8e7Sbellard         te->addr_write = -1;
22509fa3e853Sbellard     }
22519fa3e853Sbellard }
22529fa3e853Sbellard 
22530124311eSbellard #else
22540124311eSbellard 
2255ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
22560124311eSbellard {
22570124311eSbellard }
22580124311eSbellard 
22592e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
22600124311eSbellard {
22610124311eSbellard }
22620124311eSbellard 
2263edf8e2afSMika Westerberg /*
2264edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2265edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2266edf8e2afSMika Westerberg  */
22675cd2c5b6SRichard Henderson 
22685cd2c5b6SRichard Henderson struct walk_memory_regions_data
226933417e70Sbellard {
22705cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
22715cd2c5b6SRichard Henderson     void *priv;
22725cd2c5b6SRichard Henderson     unsigned long start;
22735cd2c5b6SRichard Henderson     int prot;
22745cd2c5b6SRichard Henderson };
22759fa3e853Sbellard 
22765cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2277b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
22785cd2c5b6SRichard Henderson {
22795cd2c5b6SRichard Henderson     if (data->start != -1ul) {
22805cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
22815cd2c5b6SRichard Henderson         if (rc != 0) {
22825cd2c5b6SRichard Henderson             return rc;
22835cd2c5b6SRichard Henderson         }
22845cd2c5b6SRichard Henderson     }
2285edf8e2afSMika Westerberg 
22865cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
22875cd2c5b6SRichard Henderson     data->prot = new_prot;
22885cd2c5b6SRichard Henderson 
22895cd2c5b6SRichard Henderson     return 0;
229033417e70Sbellard }
22915cd2c5b6SRichard Henderson 
22925cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2293b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
22945cd2c5b6SRichard Henderson {
2295b480d9b7SPaul Brook     abi_ulong pa;
22965cd2c5b6SRichard Henderson     int i, rc;
22975cd2c5b6SRichard Henderson 
22985cd2c5b6SRichard Henderson     if (*lp == NULL) {
22995cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
23009fa3e853Sbellard     }
23015cd2c5b6SRichard Henderson 
23025cd2c5b6SRichard Henderson     if (level == 0) {
23035cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
23047296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
23055cd2c5b6SRichard Henderson             int prot = pd[i].flags;
23065cd2c5b6SRichard Henderson 
23075cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
23085cd2c5b6SRichard Henderson             if (prot != data->prot) {
23095cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
23105cd2c5b6SRichard Henderson                 if (rc != 0) {
23115cd2c5b6SRichard Henderson                     return rc;
23129fa3e853Sbellard                 }
23139fa3e853Sbellard             }
23145cd2c5b6SRichard Henderson         }
23155cd2c5b6SRichard Henderson     } else {
23165cd2c5b6SRichard Henderson         void **pp = *lp;
23177296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
2318b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
2319b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
23205cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
23215cd2c5b6SRichard Henderson             if (rc != 0) {
23225cd2c5b6SRichard Henderson                 return rc;
23235cd2c5b6SRichard Henderson             }
23245cd2c5b6SRichard Henderson         }
23255cd2c5b6SRichard Henderson     }
23265cd2c5b6SRichard Henderson 
23275cd2c5b6SRichard Henderson     return 0;
23285cd2c5b6SRichard Henderson }
23295cd2c5b6SRichard Henderson 
23305cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
23315cd2c5b6SRichard Henderson {
23325cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
23335cd2c5b6SRichard Henderson     unsigned long i;
23345cd2c5b6SRichard Henderson 
23355cd2c5b6SRichard Henderson     data.fn = fn;
23365cd2c5b6SRichard Henderson     data.priv = priv;
23375cd2c5b6SRichard Henderson     data.start = -1ul;
23385cd2c5b6SRichard Henderson     data.prot = 0;
23395cd2c5b6SRichard Henderson 
23405cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2341b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
23425cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
23435cd2c5b6SRichard Henderson         if (rc != 0) {
23445cd2c5b6SRichard Henderson             return rc;
23455cd2c5b6SRichard Henderson         }
23465cd2c5b6SRichard Henderson     }
23475cd2c5b6SRichard Henderson 
23485cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2349edf8e2afSMika Westerberg }
2350edf8e2afSMika Westerberg 
2351b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2352b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2353edf8e2afSMika Westerberg {
2354edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2355edf8e2afSMika Westerberg 
2356b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2357b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2358edf8e2afSMika Westerberg         start, end, end - start,
2359edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2360edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2361edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2362edf8e2afSMika Westerberg 
2363edf8e2afSMika Westerberg     return (0);
2364edf8e2afSMika Westerberg }
2365edf8e2afSMika Westerberg 
2366edf8e2afSMika Westerberg /* dump memory mappings */
2367edf8e2afSMika Westerberg void page_dump(FILE *f)
2368edf8e2afSMika Westerberg {
2369edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2370edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2371edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
23729fa3e853Sbellard }
23739fa3e853Sbellard 
237453a5960aSpbrook int page_get_flags(target_ulong address)
23759fa3e853Sbellard {
23769fa3e853Sbellard     PageDesc *p;
23779fa3e853Sbellard 
23789fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
23799fa3e853Sbellard     if (!p)
23809fa3e853Sbellard         return 0;
23819fa3e853Sbellard     return p->flags;
23829fa3e853Sbellard }
23839fa3e853Sbellard 
2384376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2385376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2386376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
238753a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
23889fa3e853Sbellard {
2389376a7909SRichard Henderson     target_ulong addr, len;
23909fa3e853Sbellard 
2391376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2392376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2393376a7909SRichard Henderson        a missing call to h2g_valid.  */
2394b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2395b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2396376a7909SRichard Henderson #endif
2397376a7909SRichard Henderson     assert(start < end);
2398376a7909SRichard Henderson 
23999fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
24009fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2401376a7909SRichard Henderson 
2402376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
24039fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2404376a7909SRichard Henderson     }
2405376a7909SRichard Henderson 
2406376a7909SRichard Henderson     for (addr = start, len = end - start;
2407376a7909SRichard Henderson          len != 0;
2408376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2409376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2410376a7909SRichard Henderson 
2411376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2412376a7909SRichard Henderson            the code inside.  */
24139fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
24149fa3e853Sbellard             (flags & PAGE_WRITE) &&
24159fa3e853Sbellard             p->first_tb) {
2416d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
24179fa3e853Sbellard         }
24189fa3e853Sbellard         p->flags = flags;
24199fa3e853Sbellard     }
24209fa3e853Sbellard }
24219fa3e853Sbellard 
24223d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
24233d97b40bSths {
24243d97b40bSths     PageDesc *p;
24253d97b40bSths     target_ulong end;
24263d97b40bSths     target_ulong addr;
24273d97b40bSths 
2428376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2429376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2430376a7909SRichard Henderson        a missing call to h2g_valid.  */
2431338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2432338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2433376a7909SRichard Henderson #endif
2434376a7909SRichard Henderson 
24353e0650a9SRichard Henderson     if (len == 0) {
24363e0650a9SRichard Henderson         return 0;
24373e0650a9SRichard Henderson     }
2438376a7909SRichard Henderson     if (start + len - 1 < start) {
2439376a7909SRichard Henderson         /* We've wrapped around.  */
244055f280c9Sbalrog         return -1;
2441376a7909SRichard Henderson     }
244255f280c9Sbalrog 
24433d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
24443d97b40bSths     start = start & TARGET_PAGE_MASK;
24453d97b40bSths 
2446376a7909SRichard Henderson     for (addr = start, len = end - start;
2447376a7909SRichard Henderson          len != 0;
2448376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
24493d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
24503d97b40bSths         if( !p )
24513d97b40bSths             return -1;
24523d97b40bSths         if( !(p->flags & PAGE_VALID) )
24533d97b40bSths             return -1;
24543d97b40bSths 
2455dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
24563d97b40bSths             return -1;
2457dae3270cSbellard         if (flags & PAGE_WRITE) {
2458dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
24593d97b40bSths                 return -1;
2460dae3270cSbellard             /* unprotect the page if it was put read-only because it
2461dae3270cSbellard                contains translated code */
2462dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2463dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2464dae3270cSbellard                     return -1;
2465dae3270cSbellard             }
2466dae3270cSbellard             return 0;
2467dae3270cSbellard         }
24683d97b40bSths     }
24693d97b40bSths     return 0;
24703d97b40bSths }
24713d97b40bSths 
24729fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2473ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
247453a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
24759fa3e853Sbellard {
247645d679d6SAurelien Jarno     unsigned int prot;
247745d679d6SAurelien Jarno     PageDesc *p;
247853a5960aSpbrook     target_ulong host_start, host_end, addr;
24799fa3e853Sbellard 
2480c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2481c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2482c8a706feSpbrook        practice it seems to be ok.  */
2483c8a706feSpbrook     mmap_lock();
2484c8a706feSpbrook 
248545d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
248645d679d6SAurelien Jarno     if (!p) {
2487c8a706feSpbrook         mmap_unlock();
24889fa3e853Sbellard         return 0;
2489c8a706feSpbrook     }
249045d679d6SAurelien Jarno 
24919fa3e853Sbellard     /* if the page was really writable, then we change its
24929fa3e853Sbellard        protection back to writable */
249345d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
249445d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
249545d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
249645d679d6SAurelien Jarno 
249745d679d6SAurelien Jarno         prot = 0;
249845d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
249945d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
250045d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
250145d679d6SAurelien Jarno             prot |= p->flags;
250245d679d6SAurelien Jarno 
25039fa3e853Sbellard             /* and since the content will be modified, we must invalidate
25049fa3e853Sbellard                the corresponding translated code. */
250545d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
25069fa3e853Sbellard #ifdef DEBUG_TB_CHECK
250745d679d6SAurelien Jarno             tb_invalidate_check(addr);
25089fa3e853Sbellard #endif
250945d679d6SAurelien Jarno         }
251045d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
251145d679d6SAurelien Jarno                  prot & PAGE_BITS);
251245d679d6SAurelien Jarno 
2513c8a706feSpbrook         mmap_unlock();
25149fa3e853Sbellard         return 1;
25159fa3e853Sbellard     }
2516c8a706feSpbrook     mmap_unlock();
25179fa3e853Sbellard     return 0;
25189fa3e853Sbellard }
25199fa3e853Sbellard 
25206a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
25216a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
25221ccde1cbSbellard {
25231ccde1cbSbellard }
25249fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
252533417e70Sbellard 
2526e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
25278da3ff18Spbrook 
2528c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2529c04b2b78SPaul Brook typedef struct subpage_t {
2530c04b2b78SPaul Brook     target_phys_addr_t base;
2531f6405247SRichard Henderson     ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2532f6405247SRichard Henderson     ram_addr_t region_offset[TARGET_PAGE_SIZE];
2533c04b2b78SPaul Brook } subpage_t;
2534c04b2b78SPaul Brook 
2535c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2536c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2537f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2538f6405247SRichard Henderson                                 ram_addr_t orig_memory,
2539f6405247SRichard Henderson                                 ram_addr_t region_offset);
2540db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541db7b5426Sblueswir1                       need_subpage)                                     \
2542db7b5426Sblueswir1     do {                                                                \
2543db7b5426Sblueswir1         if (addr > start_addr)                                          \
2544db7b5426Sblueswir1             start_addr2 = 0;                                            \
2545db7b5426Sblueswir1         else {                                                          \
2546db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2547db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2548db7b5426Sblueswir1                 need_subpage = 1;                                       \
2549db7b5426Sblueswir1         }                                                               \
2550db7b5426Sblueswir1                                                                         \
255149e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2552db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2553db7b5426Sblueswir1         else {                                                          \
2554db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2556db7b5426Sblueswir1                 need_subpage = 1;                                       \
2557db7b5426Sblueswir1         }                                                               \
2558db7b5426Sblueswir1     } while (0)
2559db7b5426Sblueswir1 
25608f2498f9SMichael S. Tsirkin /* register physical memory.
25618f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
25628f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
25638da3ff18Spbrook    io memory page.  The address used when calling the IO function is
25648da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2565ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
25668da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
25678da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
2568c227f099SAnthony Liguori void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2569c227f099SAnthony Liguori                                          ram_addr_t size,
2570c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
2571c227f099SAnthony Liguori                                          ram_addr_t region_offset)
257233417e70Sbellard {
2573c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
257492e873b9Sbellard     PhysPageDesc *p;
25759d42037bSbellard     CPUState *env;
2576c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2577f6405247SRichard Henderson     subpage_t *subpage;
257833417e70Sbellard 
2579f6f3fbcaSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset);
2580f6f3fbcaSMichael S. Tsirkin 
258167c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
258267c4d23cSpbrook         region_offset = start_addr;
258367c4d23cSpbrook     }
25848da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
25855fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2586c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
258749e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2588db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2589db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2590c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2591c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2592db7b5426Sblueswir1             int need_subpage = 0;
2593db7b5426Sblueswir1 
2594db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2595db7b5426Sblueswir1                           need_subpage);
2596f6405247SRichard Henderson             if (need_subpage) {
2597db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2598db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
25998da3ff18Spbrook                                            &p->phys_offset, orig_memory,
26008da3ff18Spbrook                                            p->region_offset);
2601db7b5426Sblueswir1                 } else {
2602db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2603db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2604db7b5426Sblueswir1                 }
26058da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
26068da3ff18Spbrook                                  region_offset);
26078da3ff18Spbrook                 p->region_offset = 0;
2608db7b5426Sblueswir1             } else {
2609db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2610db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2611db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2612db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2613db7b5426Sblueswir1             }
2614db7b5426Sblueswir1         } else {
2615108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
26169fa3e853Sbellard             p->phys_offset = phys_offset;
26178da3ff18Spbrook             p->region_offset = region_offset;
26182a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
26198da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
262033417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
26218da3ff18Spbrook             } else {
2622c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2623db7b5426Sblueswir1                 int need_subpage = 0;
2624db7b5426Sblueswir1 
2625db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2626db7b5426Sblueswir1                               end_addr2, need_subpage);
2627db7b5426Sblueswir1 
2628f6405247SRichard Henderson                 if (need_subpage) {
2629db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26308da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
263167c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2632db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
26338da3ff18Spbrook                                      phys_offset, region_offset);
26348da3ff18Spbrook                     p->region_offset = 0;
2635db7b5426Sblueswir1                 }
2636db7b5426Sblueswir1             }
2637db7b5426Sblueswir1         }
26388da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
263933417e70Sbellard     }
26409d42037bSbellard 
26419d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
26429d42037bSbellard        reset the modified entries */
26439d42037bSbellard     /* XXX: slow ! */
26449d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
26459d42037bSbellard         tlb_flush(env, 1);
26469d42037bSbellard     }
264733417e70Sbellard }
264833417e70Sbellard 
2649ba863458Sbellard /* XXX: temporary until new memory mapping API */
2650c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2651ba863458Sbellard {
2652ba863458Sbellard     PhysPageDesc *p;
2653ba863458Sbellard 
2654ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2655ba863458Sbellard     if (!p)
2656ba863458Sbellard         return IO_MEM_UNASSIGNED;
2657ba863458Sbellard     return p->phys_offset;
2658ba863458Sbellard }
2659ba863458Sbellard 
2660c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661f65ed4c1Saliguori {
2662f65ed4c1Saliguori     if (kvm_enabled())
2663f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2664f65ed4c1Saliguori }
2665f65ed4c1Saliguori 
2666c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2667f65ed4c1Saliguori {
2668f65ed4c1Saliguori     if (kvm_enabled())
2669f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2670f65ed4c1Saliguori }
2671f65ed4c1Saliguori 
267262a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
267362a2744cSSheng Yang {
267462a2744cSSheng Yang     if (kvm_enabled())
267562a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
267662a2744cSSheng Yang }
267762a2744cSSheng Yang 
2678c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2679c902760fSMarcelo Tosatti 
2680c902760fSMarcelo Tosatti #include <sys/vfs.h>
2681c902760fSMarcelo Tosatti 
2682c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2683c902760fSMarcelo Tosatti 
2684c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2685c902760fSMarcelo Tosatti {
2686c902760fSMarcelo Tosatti     struct statfs fs;
2687c902760fSMarcelo Tosatti     int ret;
2688c902760fSMarcelo Tosatti 
2689c902760fSMarcelo Tosatti     do {
2690c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2691c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2692c902760fSMarcelo Tosatti 
2693c902760fSMarcelo Tosatti     if (ret != 0) {
26946adc0549SMichael Tokarev         perror(path);
2695c902760fSMarcelo Tosatti         return 0;
2696c902760fSMarcelo Tosatti     }
2697c902760fSMarcelo Tosatti 
2698c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2699c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2700c902760fSMarcelo Tosatti 
2701c902760fSMarcelo Tosatti     return fs.f_bsize;
2702c902760fSMarcelo Tosatti }
2703c902760fSMarcelo Tosatti 
270404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
270504b16653SAlex Williamson                             ram_addr_t memory,
270604b16653SAlex Williamson                             const char *path)
2707c902760fSMarcelo Tosatti {
2708c902760fSMarcelo Tosatti     char *filename;
2709c902760fSMarcelo Tosatti     void *area;
2710c902760fSMarcelo Tosatti     int fd;
2711c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2712c902760fSMarcelo Tosatti     int flags;
2713c902760fSMarcelo Tosatti #endif
2714c902760fSMarcelo Tosatti     unsigned long hpagesize;
2715c902760fSMarcelo Tosatti 
2716c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2717c902760fSMarcelo Tosatti     if (!hpagesize) {
2718c902760fSMarcelo Tosatti         return NULL;
2719c902760fSMarcelo Tosatti     }
2720c902760fSMarcelo Tosatti 
2721c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2722c902760fSMarcelo Tosatti         return NULL;
2723c902760fSMarcelo Tosatti     }
2724c902760fSMarcelo Tosatti 
2725c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727c902760fSMarcelo Tosatti         return NULL;
2728c902760fSMarcelo Tosatti     }
2729c902760fSMarcelo Tosatti 
2730c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731c902760fSMarcelo Tosatti         return NULL;
2732c902760fSMarcelo Tosatti     }
2733c902760fSMarcelo Tosatti 
2734c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2735c902760fSMarcelo Tosatti     if (fd < 0) {
27366adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2737c902760fSMarcelo Tosatti         free(filename);
2738c902760fSMarcelo Tosatti         return NULL;
2739c902760fSMarcelo Tosatti     }
2740c902760fSMarcelo Tosatti     unlink(filename);
2741c902760fSMarcelo Tosatti     free(filename);
2742c902760fSMarcelo Tosatti 
2743c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744c902760fSMarcelo Tosatti 
2745c902760fSMarcelo Tosatti     /*
2746c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2747c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2748c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2749c902760fSMarcelo Tosatti      * mmap will fail.
2750c902760fSMarcelo Tosatti      */
2751c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2752c902760fSMarcelo Tosatti         perror("ftruncate");
2753c902760fSMarcelo Tosatti 
2754c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2755c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2757c902760fSMarcelo Tosatti      * to sidestep this quirk.
2758c902760fSMarcelo Tosatti      */
2759c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761c902760fSMarcelo Tosatti #else
2762c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763c902760fSMarcelo Tosatti #endif
2764c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2765c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2766c902760fSMarcelo Tosatti         close(fd);
2767c902760fSMarcelo Tosatti         return (NULL);
2768c902760fSMarcelo Tosatti     }
276904b16653SAlex Williamson     block->fd = fd;
2770c902760fSMarcelo Tosatti     return area;
2771c902760fSMarcelo Tosatti }
2772c902760fSMarcelo Tosatti #endif
2773c902760fSMarcelo Tosatti 
2774d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2775d17b5288SAlex Williamson {
277604b16653SAlex Williamson     RAMBlock *block, *next_block;
277709d7ae90SBlue Swirl     ram_addr_t offset = 0, mingap = ULONG_MAX;
277804b16653SAlex Williamson 
277904b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
278004b16653SAlex Williamson         return 0;
278104b16653SAlex Williamson 
278204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
278304b16653SAlex Williamson         ram_addr_t end, next = ULONG_MAX;
278404b16653SAlex Williamson 
278504b16653SAlex Williamson         end = block->offset + block->length;
278604b16653SAlex Williamson 
278704b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
278804b16653SAlex Williamson             if (next_block->offset >= end) {
278904b16653SAlex Williamson                 next = MIN(next, next_block->offset);
279004b16653SAlex Williamson             }
279104b16653SAlex Williamson         }
279204b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
279304b16653SAlex Williamson             offset =  end;
279404b16653SAlex Williamson             mingap = next - end;
279504b16653SAlex Williamson         }
279604b16653SAlex Williamson     }
279704b16653SAlex Williamson     return offset;
279804b16653SAlex Williamson }
279904b16653SAlex Williamson 
280004b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
280104b16653SAlex Williamson {
2802d17b5288SAlex Williamson     RAMBlock *block;
2803d17b5288SAlex Williamson     ram_addr_t last = 0;
2804d17b5288SAlex Williamson 
2805d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2806d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2807d17b5288SAlex Williamson 
2808d17b5288SAlex Williamson     return last;
2809d17b5288SAlex Williamson }
2810d17b5288SAlex Williamson 
281184b89d78SCam Macdonell ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
281284b89d78SCam Macdonell                                    ram_addr_t size, void *host)
281384b89d78SCam Macdonell {
281484b89d78SCam Macdonell     RAMBlock *new_block, *block;
281584b89d78SCam Macdonell 
281684b89d78SCam Macdonell     size = TARGET_PAGE_ALIGN(size);
281784b89d78SCam Macdonell     new_block = qemu_mallocz(sizeof(*new_block));
281884b89d78SCam Macdonell 
281984b89d78SCam Macdonell     if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
282084b89d78SCam Macdonell         char *id = dev->parent_bus->info->get_dev_path(dev);
282184b89d78SCam Macdonell         if (id) {
282284b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
282384b89d78SCam Macdonell             qemu_free(id);
282484b89d78SCam Macdonell         }
282584b89d78SCam Macdonell     }
282684b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
282784b89d78SCam Macdonell 
282884b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
282984b89d78SCam Macdonell         if (!strcmp(block->idstr, new_block->idstr)) {
283084b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
283184b89d78SCam Macdonell                     new_block->idstr);
283284b89d78SCam Macdonell             abort();
283384b89d78SCam Macdonell         }
283484b89d78SCam Macdonell     }
283584b89d78SCam Macdonell 
28366977dfe6SYoshiaki Tamura     if (host) {
283784b89d78SCam Macdonell         new_block->host = host;
28386977dfe6SYoshiaki Tamura     } else {
2839c902760fSMarcelo Tosatti         if (mem_path) {
2840c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
284104b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2842618a568dSMarcelo Tosatti             if (!new_block->host) {
2843618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
2844618a568dSMarcelo Tosatti #ifdef MADV_MERGEABLE
2845618a568dSMarcelo Tosatti                 madvise(new_block->host, size, MADV_MERGEABLE);
2846618a568dSMarcelo Tosatti #endif
2847618a568dSMarcelo Tosatti             }
2848c902760fSMarcelo Tosatti #else
2849c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2850c902760fSMarcelo Tosatti             exit(1);
2851c902760fSMarcelo Tosatti #endif
2852c902760fSMarcelo Tosatti         } else {
28536b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
28546b02494dSAlexander Graf             /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2855c902760fSMarcelo Tosatti             new_block->host = mmap((void*)0x1000000, size,
2856c902760fSMarcelo Tosatti                                    PROT_EXEC|PROT_READ|PROT_WRITE,
28576b02494dSAlexander Graf                                    MAP_SHARED | MAP_ANONYMOUS, -1, 0);
28586b02494dSAlexander Graf #else
285994a6b54fSpbrook             new_block->host = qemu_vmalloc(size);
28606b02494dSAlexander Graf #endif
2861ccb167e9SIzik Eidus #ifdef MADV_MERGEABLE
2862ccb167e9SIzik Eidus             madvise(new_block->host, size, MADV_MERGEABLE);
2863ccb167e9SIzik Eidus #endif
2864c902760fSMarcelo Tosatti         }
28656977dfe6SYoshiaki Tamura     }
28666977dfe6SYoshiaki Tamura 
2867d17b5288SAlex Williamson     new_block->offset = find_ram_offset(size);
286894a6b54fSpbrook     new_block->length = size;
286994a6b54fSpbrook 
2870f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
287194a6b54fSpbrook 
2872f471a17eSAlex Williamson     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
287304b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
2874d17b5288SAlex Williamson     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
287594a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
287694a6b54fSpbrook 
28776f0437e8SJan Kiszka     if (kvm_enabled())
28786f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
28796f0437e8SJan Kiszka 
288094a6b54fSpbrook     return new_block->offset;
288194a6b54fSpbrook }
2882e9a1ab19Sbellard 
28836977dfe6SYoshiaki Tamura ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
28846977dfe6SYoshiaki Tamura {
28856977dfe6SYoshiaki Tamura     return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
28866977dfe6SYoshiaki Tamura }
28876977dfe6SYoshiaki Tamura 
2888c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2889e9a1ab19Sbellard {
289004b16653SAlex Williamson     RAMBlock *block;
289104b16653SAlex Williamson 
289204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
289304b16653SAlex Williamson         if (addr == block->offset) {
289404b16653SAlex Williamson             QLIST_REMOVE(block, next);
289504b16653SAlex Williamson             if (mem_path) {
289604b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
289704b16653SAlex Williamson                 if (block->fd) {
289804b16653SAlex Williamson                     munmap(block->host, block->length);
289904b16653SAlex Williamson                     close(block->fd);
290004b16653SAlex Williamson                 } else {
290104b16653SAlex Williamson                     qemu_vfree(block->host);
290204b16653SAlex Williamson                 }
290304b16653SAlex Williamson #endif
290404b16653SAlex Williamson             } else {
290504b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
290604b16653SAlex Williamson                 munmap(block->host, block->length);
290704b16653SAlex Williamson #else
290804b16653SAlex Williamson                 qemu_vfree(block->host);
290904b16653SAlex Williamson #endif
291004b16653SAlex Williamson             }
291104b16653SAlex Williamson             qemu_free(block);
291204b16653SAlex Williamson             return;
291304b16653SAlex Williamson         }
291404b16653SAlex Williamson     }
291504b16653SAlex Williamson 
2916e9a1ab19Sbellard }
2917e9a1ab19Sbellard 
2918dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
29195579c7f3Spbrook    With the exception of the softmmu code in this file, this should
29205579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
29215579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
29225579c7f3Spbrook 
29235579c7f3Spbrook    It should not be used for general purpose DMA.
29245579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
29255579c7f3Spbrook  */
2926c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2927dc828ca1Spbrook {
292894a6b54fSpbrook     RAMBlock *block;
292994a6b54fSpbrook 
2930f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2931f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
2932f471a17eSAlex Williamson             QLIST_REMOVE(block, next);
2933f471a17eSAlex Williamson             QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2934f471a17eSAlex Williamson             return block->host + (addr - block->offset);
293594a6b54fSpbrook         }
2936f471a17eSAlex Williamson     }
2937f471a17eSAlex Williamson 
293894a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
293994a6b54fSpbrook     abort();
2940f471a17eSAlex Williamson 
2941f471a17eSAlex Williamson     return NULL;
2942dc828ca1Spbrook }
2943dc828ca1Spbrook 
29445579c7f3Spbrook /* Some of the softmmu routines need to translate from a host pointer
29455579c7f3Spbrook    (typically a TLB entry) back to a ram offset.  */
2946c227f099SAnthony Liguori ram_addr_t qemu_ram_addr_from_host(void *ptr)
29475579c7f3Spbrook {
294894a6b54fSpbrook     RAMBlock *block;
294994a6b54fSpbrook     uint8_t *host = ptr;
295094a6b54fSpbrook 
2951f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2952f471a17eSAlex Williamson         if (host - block->host < block->length) {
2953f471a17eSAlex Williamson             return block->offset + (host - block->host);
295494a6b54fSpbrook         }
2955f471a17eSAlex Williamson     }
2956f471a17eSAlex Williamson 
295794a6b54fSpbrook     fprintf(stderr, "Bad ram pointer %p\n", ptr);
295894a6b54fSpbrook     abort();
2959f471a17eSAlex Williamson 
2960f471a17eSAlex Williamson     return 0;
29615579c7f3Spbrook }
29625579c7f3Spbrook 
2963c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
296433417e70Sbellard {
296567d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2966ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
296767d3b957Spbrook #endif
2968faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2969e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2970e18231a3Sblueswir1 #endif
2971e18231a3Sblueswir1     return 0;
2972e18231a3Sblueswir1 }
2973e18231a3Sblueswir1 
2974c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2975e18231a3Sblueswir1 {
2976e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2977e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2978e18231a3Sblueswir1 #endif
2979faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2980e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2981e18231a3Sblueswir1 #endif
2982e18231a3Sblueswir1     return 0;
2983e18231a3Sblueswir1 }
2984e18231a3Sblueswir1 
2985c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2986e18231a3Sblueswir1 {
2987e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2988e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2989e18231a3Sblueswir1 #endif
2990faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2991e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2992b4f0a316Sblueswir1 #endif
299333417e70Sbellard     return 0;
299433417e70Sbellard }
299533417e70Sbellard 
2996c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
299733417e70Sbellard {
299867d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2999ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
300067d3b957Spbrook #endif
3001faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3002e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
3003e18231a3Sblueswir1 #endif
3004e18231a3Sblueswir1 }
3005e18231a3Sblueswir1 
3006c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3007e18231a3Sblueswir1 {
3008e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3009e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3010e18231a3Sblueswir1 #endif
3011faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3012e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
3013e18231a3Sblueswir1 #endif
3014e18231a3Sblueswir1 }
3015e18231a3Sblueswir1 
3016c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3017e18231a3Sblueswir1 {
3018e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3019e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3020e18231a3Sblueswir1 #endif
3021faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3022e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
3023b4f0a316Sblueswir1 #endif
302433417e70Sbellard }
302533417e70Sbellard 
3026d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
302733417e70Sbellard     unassigned_mem_readb,
3028e18231a3Sblueswir1     unassigned_mem_readw,
3029e18231a3Sblueswir1     unassigned_mem_readl,
303033417e70Sbellard };
303133417e70Sbellard 
3032d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
303333417e70Sbellard     unassigned_mem_writeb,
3034e18231a3Sblueswir1     unassigned_mem_writew,
3035e18231a3Sblueswir1     unassigned_mem_writel,
303633417e70Sbellard };
303733417e70Sbellard 
3038c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
30390f459d16Spbrook                                 uint32_t val)
30401ccde1cbSbellard {
30413a7d929eSbellard     int dirty_flags;
3042f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30433a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
30443a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
30453a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
3046f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30473a7d929eSbellard #endif
30483a7d929eSbellard     }
30495579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
3050f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3051f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3052f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3053f23db169Sbellard        flushed */
3054f23db169Sbellard     if (dirty_flags == 0xff)
30552e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
30561ccde1cbSbellard }
30571ccde1cbSbellard 
3058c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
30590f459d16Spbrook                                 uint32_t val)
30601ccde1cbSbellard {
30613a7d929eSbellard     int dirty_flags;
3062f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30633a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
30643a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
30653a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
3066f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30673a7d929eSbellard #endif
30683a7d929eSbellard     }
30695579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
3070f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3071f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3072f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3073f23db169Sbellard        flushed */
3074f23db169Sbellard     if (dirty_flags == 0xff)
30752e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
30761ccde1cbSbellard }
30771ccde1cbSbellard 
3078c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
30790f459d16Spbrook                                 uint32_t val)
30801ccde1cbSbellard {
30813a7d929eSbellard     int dirty_flags;
3082f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30833a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
30843a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
30853a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
3086f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30873a7d929eSbellard #endif
30883a7d929eSbellard     }
30895579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
3090f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3091f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3092f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3093f23db169Sbellard        flushed */
3094f23db169Sbellard     if (dirty_flags == 0xff)
30952e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
30961ccde1cbSbellard }
30971ccde1cbSbellard 
3098d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
30993a7d929eSbellard     NULL, /* never used */
31003a7d929eSbellard     NULL, /* never used */
31013a7d929eSbellard     NULL, /* never used */
31023a7d929eSbellard };
31033a7d929eSbellard 
3104d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
31051ccde1cbSbellard     notdirty_mem_writeb,
31061ccde1cbSbellard     notdirty_mem_writew,
31071ccde1cbSbellard     notdirty_mem_writel,
31081ccde1cbSbellard };
31091ccde1cbSbellard 
31100f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
3111b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
31120f459d16Spbrook {
31130f459d16Spbrook     CPUState *env = cpu_single_env;
311406d55cc1Saliguori     target_ulong pc, cs_base;
311506d55cc1Saliguori     TranslationBlock *tb;
31160f459d16Spbrook     target_ulong vaddr;
3117a1d1bb31Saliguori     CPUWatchpoint *wp;
311806d55cc1Saliguori     int cpu_flags;
31190f459d16Spbrook 
312006d55cc1Saliguori     if (env->watchpoint_hit) {
312106d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
312206d55cc1Saliguori          * the debug interrupt so that is will trigger after the
312306d55cc1Saliguori          * current instruction. */
312406d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
312506d55cc1Saliguori         return;
312606d55cc1Saliguori     }
31272e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
312872cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3129b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
3130b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
31316e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
31326e140f28Saliguori             if (!env->watchpoint_hit) {
3133a1d1bb31Saliguori                 env->watchpoint_hit = wp;
313406d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
313506d55cc1Saliguori                 if (!tb) {
31366e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
31376e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
313806d55cc1Saliguori                 }
313906d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
314006d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
314106d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
314206d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
314306d55cc1Saliguori                 } else {
314406d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
314506d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
314606d55cc1Saliguori                 }
314706d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
31480f459d16Spbrook             }
31496e140f28Saliguori         } else {
31506e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
31516e140f28Saliguori         }
31520f459d16Spbrook     }
31530f459d16Spbrook }
31540f459d16Spbrook 
31556658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
31566658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
31576658ffb8Spbrook    phys routines.  */
3158c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
31596658ffb8Spbrook {
3160b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
31616658ffb8Spbrook     return ldub_phys(addr);
31626658ffb8Spbrook }
31636658ffb8Spbrook 
3164c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
31656658ffb8Spbrook {
3166b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
31676658ffb8Spbrook     return lduw_phys(addr);
31686658ffb8Spbrook }
31696658ffb8Spbrook 
3170c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
31716658ffb8Spbrook {
3172b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
31736658ffb8Spbrook     return ldl_phys(addr);
31746658ffb8Spbrook }
31756658ffb8Spbrook 
3176c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
31776658ffb8Spbrook                              uint32_t val)
31786658ffb8Spbrook {
3179b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
31806658ffb8Spbrook     stb_phys(addr, val);
31816658ffb8Spbrook }
31826658ffb8Spbrook 
3183c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
31846658ffb8Spbrook                              uint32_t val)
31856658ffb8Spbrook {
3186b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
31876658ffb8Spbrook     stw_phys(addr, val);
31886658ffb8Spbrook }
31896658ffb8Spbrook 
3190c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
31916658ffb8Spbrook                              uint32_t val)
31926658ffb8Spbrook {
3193b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
31946658ffb8Spbrook     stl_phys(addr, val);
31956658ffb8Spbrook }
31966658ffb8Spbrook 
3197d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
31986658ffb8Spbrook     watch_mem_readb,
31996658ffb8Spbrook     watch_mem_readw,
32006658ffb8Spbrook     watch_mem_readl,
32016658ffb8Spbrook };
32026658ffb8Spbrook 
3203d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
32046658ffb8Spbrook     watch_mem_writeb,
32056658ffb8Spbrook     watch_mem_writew,
32066658ffb8Spbrook     watch_mem_writel,
32076658ffb8Spbrook };
32086658ffb8Spbrook 
3209f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio,
3210f6405247SRichard Henderson                                         target_phys_addr_t addr,
3211db7b5426Sblueswir1                                         unsigned int len)
3212db7b5426Sblueswir1 {
3213f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3214db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3215db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3216db7b5426Sblueswir1            mmio, len, addr, idx);
3217db7b5426Sblueswir1 #endif
3218db7b5426Sblueswir1 
3219f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3220f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3221f6405247SRichard Henderson     return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3222db7b5426Sblueswir1 }
3223db7b5426Sblueswir1 
3224c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3225db7b5426Sblueswir1                                      uint32_t value, unsigned int len)
3226db7b5426Sblueswir1 {
3227f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3228db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3229f6405247SRichard Henderson     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3230f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3231db7b5426Sblueswir1 #endif
3232f6405247SRichard Henderson 
3233f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3234f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3235f6405247SRichard Henderson     io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3236db7b5426Sblueswir1 }
3237db7b5426Sblueswir1 
3238c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3239db7b5426Sblueswir1 {
3240db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3241db7b5426Sblueswir1 }
3242db7b5426Sblueswir1 
3243c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3244db7b5426Sblueswir1                             uint32_t value)
3245db7b5426Sblueswir1 {
3246db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3247db7b5426Sblueswir1 }
3248db7b5426Sblueswir1 
3249c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3250db7b5426Sblueswir1 {
3251db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3252db7b5426Sblueswir1 }
3253db7b5426Sblueswir1 
3254c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3255db7b5426Sblueswir1                             uint32_t value)
3256db7b5426Sblueswir1 {
3257db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3258db7b5426Sblueswir1 }
3259db7b5426Sblueswir1 
3260c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3261db7b5426Sblueswir1 {
3262db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3263db7b5426Sblueswir1 }
3264db7b5426Sblueswir1 
3265f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr,
3266f6405247SRichard Henderson                             uint32_t value)
3267db7b5426Sblueswir1 {
3268db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3269db7b5426Sblueswir1 }
3270db7b5426Sblueswir1 
3271d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3272db7b5426Sblueswir1     &subpage_readb,
3273db7b5426Sblueswir1     &subpage_readw,
3274db7b5426Sblueswir1     &subpage_readl,
3275db7b5426Sblueswir1 };
3276db7b5426Sblueswir1 
3277d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3278db7b5426Sblueswir1     &subpage_writeb,
3279db7b5426Sblueswir1     &subpage_writew,
3280db7b5426Sblueswir1     &subpage_writel,
3281db7b5426Sblueswir1 };
3282db7b5426Sblueswir1 
3283c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3284c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3285db7b5426Sblueswir1 {
3286db7b5426Sblueswir1     int idx, eidx;
3287db7b5426Sblueswir1 
3288db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3289db7b5426Sblueswir1         return -1;
3290db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3291db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3292db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
32930bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3294db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3295db7b5426Sblueswir1 #endif
329695c318f5SGleb Natapov     if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
329795c318f5SGleb Natapov         memory = IO_MEM_UNASSIGNED;
3298f6405247SRichard Henderson     memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3299db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
3300f6405247SRichard Henderson         mmio->sub_io_index[idx] = memory;
3301f6405247SRichard Henderson         mmio->region_offset[idx] = region_offset;
3302db7b5426Sblueswir1     }
3303db7b5426Sblueswir1 
3304db7b5426Sblueswir1     return 0;
3305db7b5426Sblueswir1 }
3306db7b5426Sblueswir1 
3307f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3308f6405247SRichard Henderson                                 ram_addr_t orig_memory,
3309f6405247SRichard Henderson                                 ram_addr_t region_offset)
3310db7b5426Sblueswir1 {
3311c227f099SAnthony Liguori     subpage_t *mmio;
3312db7b5426Sblueswir1     int subpage_memory;
3313db7b5426Sblueswir1 
3314c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
33151eec614bSaliguori 
3316db7b5426Sblueswir1     mmio->base = base;
33171eed09cbSAvi Kivity     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3318db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3319db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3320db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3321db7b5426Sblueswir1 #endif
3322db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
3323f6405247SRichard Henderson     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3324db7b5426Sblueswir1 
3325db7b5426Sblueswir1     return mmio;
3326db7b5426Sblueswir1 }
3327db7b5426Sblueswir1 
332888715657Saliguori static int get_free_io_mem_idx(void)
332988715657Saliguori {
333088715657Saliguori     int i;
333188715657Saliguori 
333288715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
333388715657Saliguori         if (!io_mem_used[i]) {
333488715657Saliguori             io_mem_used[i] = 1;
333588715657Saliguori             return i;
333688715657Saliguori         }
3337c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
333888715657Saliguori     return -1;
333988715657Saliguori }
334088715657Saliguori 
334133417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
334233417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
33430b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
33443ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
33454254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
33464254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
33474254fab8Sblueswir1    returned if error. */
33481eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3349d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3350d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3351a4193c8aSbellard                                         void *opaque)
335233417e70Sbellard {
33533cab721dSRichard Henderson     int i;
33543cab721dSRichard Henderson 
335533417e70Sbellard     if (io_index <= 0) {
335688715657Saliguori         io_index = get_free_io_mem_idx();
335788715657Saliguori         if (io_index == -1)
335888715657Saliguori             return io_index;
335933417e70Sbellard     } else {
33601eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
336133417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
336233417e70Sbellard             return -1;
336333417e70Sbellard     }
336433417e70Sbellard 
33653cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
33663cab721dSRichard Henderson         io_mem_read[io_index][i]
33673cab721dSRichard Henderson             = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
33683cab721dSRichard Henderson     }
33693cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
33703cab721dSRichard Henderson         io_mem_write[io_index][i]
33713cab721dSRichard Henderson             = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
33723cab721dSRichard Henderson     }
3373a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
3374f6405247SRichard Henderson 
3375f6405247SRichard Henderson     return (io_index << IO_MEM_SHIFT);
337633417e70Sbellard }
337761382a50Sbellard 
3378d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3379d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
33801eed09cbSAvi Kivity                            void *opaque)
33811eed09cbSAvi Kivity {
33821eed09cbSAvi Kivity     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
33831eed09cbSAvi Kivity }
33841eed09cbSAvi Kivity 
338588715657Saliguori void cpu_unregister_io_memory(int io_table_address)
338688715657Saliguori {
338788715657Saliguori     int i;
338888715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
338988715657Saliguori 
339088715657Saliguori     for (i=0;i < 3; i++) {
339188715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
339288715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
339388715657Saliguori     }
339488715657Saliguori     io_mem_opaque[io_index] = NULL;
339588715657Saliguori     io_mem_used[io_index] = 0;
339688715657Saliguori }
339788715657Saliguori 
3398e9179ce1SAvi Kivity static void io_mem_init(void)
3399e9179ce1SAvi Kivity {
3400e9179ce1SAvi Kivity     int i;
3401e9179ce1SAvi Kivity 
3402e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3403e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3404e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3405e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3406e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3407e9179ce1SAvi Kivity 
3408e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3409e9179ce1SAvi Kivity                                           watch_mem_write, NULL);
3410e9179ce1SAvi Kivity }
3411e9179ce1SAvi Kivity 
3412e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3413e2eef170Spbrook 
341413eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
341513eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3416a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3417a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
341813eb76e0Sbellard {
341913eb76e0Sbellard     int l, flags;
342013eb76e0Sbellard     target_ulong page;
342153a5960aSpbrook     void * p;
342213eb76e0Sbellard 
342313eb76e0Sbellard     while (len > 0) {
342413eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
342513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
342613eb76e0Sbellard         if (l > len)
342713eb76e0Sbellard             l = len;
342813eb76e0Sbellard         flags = page_get_flags(page);
342913eb76e0Sbellard         if (!(flags & PAGE_VALID))
3430a68fe89cSPaul Brook             return -1;
343113eb76e0Sbellard         if (is_write) {
343213eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3433a68fe89cSPaul Brook                 return -1;
3434579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
343572fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3436a68fe89cSPaul Brook                 return -1;
343772fb7daaSaurel32             memcpy(p, buf, l);
343872fb7daaSaurel32             unlock_user(p, addr, l);
343913eb76e0Sbellard         } else {
344013eb76e0Sbellard             if (!(flags & PAGE_READ))
3441a68fe89cSPaul Brook                 return -1;
3442579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
344372fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3444a68fe89cSPaul Brook                 return -1;
344572fb7daaSaurel32             memcpy(buf, p, l);
34465b257578Saurel32             unlock_user(p, addr, 0);
344713eb76e0Sbellard         }
344813eb76e0Sbellard         len -= l;
344913eb76e0Sbellard         buf += l;
345013eb76e0Sbellard         addr += l;
345113eb76e0Sbellard     }
3452a68fe89cSPaul Brook     return 0;
345313eb76e0Sbellard }
34548df1cd07Sbellard 
345513eb76e0Sbellard #else
3456c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
345713eb76e0Sbellard                             int len, int is_write)
345813eb76e0Sbellard {
345913eb76e0Sbellard     int l, io_index;
346013eb76e0Sbellard     uint8_t *ptr;
346113eb76e0Sbellard     uint32_t val;
3462c227f099SAnthony Liguori     target_phys_addr_t page;
34632e12669aSbellard     unsigned long pd;
346492e873b9Sbellard     PhysPageDesc *p;
346513eb76e0Sbellard 
346613eb76e0Sbellard     while (len > 0) {
346713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
346813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
346913eb76e0Sbellard         if (l > len)
347013eb76e0Sbellard             l = len;
347192e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
347213eb76e0Sbellard         if (!p) {
347313eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
347413eb76e0Sbellard         } else {
347513eb76e0Sbellard             pd = p->phys_offset;
347613eb76e0Sbellard         }
347713eb76e0Sbellard 
347813eb76e0Sbellard         if (is_write) {
34793a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3480c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
348113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34828da3ff18Spbrook                 if (p)
34836c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34846a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
34856a00d601Sbellard                    potential bugs */
34866c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
34871c213d19Sbellard                     /* 32 bit write access */
3488c27004ecSbellard                     val = ldl_p(buf);
34896c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
349013eb76e0Sbellard                     l = 4;
34916c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
34921c213d19Sbellard                     /* 16 bit write access */
3493c27004ecSbellard                     val = lduw_p(buf);
34946c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
349513eb76e0Sbellard                     l = 2;
349613eb76e0Sbellard                 } else {
34971c213d19Sbellard                     /* 8 bit write access */
3498c27004ecSbellard                     val = ldub_p(buf);
34996c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
350013eb76e0Sbellard                     l = 1;
350113eb76e0Sbellard                 }
350213eb76e0Sbellard             } else {
3503b448f2f3Sbellard                 unsigned long addr1;
3504b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
350513eb76e0Sbellard                 /* RAM case */
35065579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
350713eb76e0Sbellard                 memcpy(ptr, buf, l);
35083a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3509b448f2f3Sbellard                     /* invalidate code */
3510b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3511b448f2f3Sbellard                     /* set dirty bit */
3512f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3513f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
351413eb76e0Sbellard                 }
35153a7d929eSbellard             }
351613eb76e0Sbellard         } else {
35172a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
35182a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3519c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
352013eb76e0Sbellard                 /* I/O case */
352113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
35228da3ff18Spbrook                 if (p)
35236c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
35246c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
352513eb76e0Sbellard                     /* 32 bit read access */
35266c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3527c27004ecSbellard                     stl_p(buf, val);
352813eb76e0Sbellard                     l = 4;
35296c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
353013eb76e0Sbellard                     /* 16 bit read access */
35316c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3532c27004ecSbellard                     stw_p(buf, val);
353313eb76e0Sbellard                     l = 2;
353413eb76e0Sbellard                 } else {
35351c213d19Sbellard                     /* 8 bit read access */
35366c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3537c27004ecSbellard                     stb_p(buf, val);
353813eb76e0Sbellard                     l = 1;
353913eb76e0Sbellard                 }
354013eb76e0Sbellard             } else {
354113eb76e0Sbellard                 /* RAM case */
35425579c7f3Spbrook                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
354313eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
354413eb76e0Sbellard                 memcpy(buf, ptr, l);
354513eb76e0Sbellard             }
354613eb76e0Sbellard         }
354713eb76e0Sbellard         len -= l;
354813eb76e0Sbellard         buf += l;
354913eb76e0Sbellard         addr += l;
355013eb76e0Sbellard     }
355113eb76e0Sbellard }
35528df1cd07Sbellard 
3553d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3554c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3555d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3556d0ecd2aaSbellard {
3557d0ecd2aaSbellard     int l;
3558d0ecd2aaSbellard     uint8_t *ptr;
3559c227f099SAnthony Liguori     target_phys_addr_t page;
3560d0ecd2aaSbellard     unsigned long pd;
3561d0ecd2aaSbellard     PhysPageDesc *p;
3562d0ecd2aaSbellard 
3563d0ecd2aaSbellard     while (len > 0) {
3564d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3565d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3566d0ecd2aaSbellard         if (l > len)
3567d0ecd2aaSbellard             l = len;
3568d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3569d0ecd2aaSbellard         if (!p) {
3570d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3571d0ecd2aaSbellard         } else {
3572d0ecd2aaSbellard             pd = p->phys_offset;
3573d0ecd2aaSbellard         }
3574d0ecd2aaSbellard 
3575d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
35762a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
35772a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3578d0ecd2aaSbellard             /* do nothing */
3579d0ecd2aaSbellard         } else {
3580d0ecd2aaSbellard             unsigned long addr1;
3581d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3582d0ecd2aaSbellard             /* ROM/RAM case */
35835579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3584d0ecd2aaSbellard             memcpy(ptr, buf, l);
3585d0ecd2aaSbellard         }
3586d0ecd2aaSbellard         len -= l;
3587d0ecd2aaSbellard         buf += l;
3588d0ecd2aaSbellard         addr += l;
3589d0ecd2aaSbellard     }
3590d0ecd2aaSbellard }
3591d0ecd2aaSbellard 
35926d16c2f8Saliguori typedef struct {
35936d16c2f8Saliguori     void *buffer;
3594c227f099SAnthony Liguori     target_phys_addr_t addr;
3595c227f099SAnthony Liguori     target_phys_addr_t len;
35966d16c2f8Saliguori } BounceBuffer;
35976d16c2f8Saliguori 
35986d16c2f8Saliguori static BounceBuffer bounce;
35996d16c2f8Saliguori 
3600ba223c29Saliguori typedef struct MapClient {
3601ba223c29Saliguori     void *opaque;
3602ba223c29Saliguori     void (*callback)(void *opaque);
360372cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3604ba223c29Saliguori } MapClient;
3605ba223c29Saliguori 
360672cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
360772cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3608ba223c29Saliguori 
3609ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3610ba223c29Saliguori {
3611ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3612ba223c29Saliguori 
3613ba223c29Saliguori     client->opaque = opaque;
3614ba223c29Saliguori     client->callback = callback;
361572cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3616ba223c29Saliguori     return client;
3617ba223c29Saliguori }
3618ba223c29Saliguori 
3619ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3620ba223c29Saliguori {
3621ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3622ba223c29Saliguori 
362372cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
362434d5e948SIsaku Yamahata     qemu_free(client);
3625ba223c29Saliguori }
3626ba223c29Saliguori 
3627ba223c29Saliguori static void cpu_notify_map_clients(void)
3628ba223c29Saliguori {
3629ba223c29Saliguori     MapClient *client;
3630ba223c29Saliguori 
363172cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
363272cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3633ba223c29Saliguori         client->callback(client->opaque);
363434d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3635ba223c29Saliguori     }
3636ba223c29Saliguori }
3637ba223c29Saliguori 
36386d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
36396d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
36406d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
36416d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3642ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3643ba223c29Saliguori  * likely to succeed.
36446d16c2f8Saliguori  */
3645c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3646c227f099SAnthony Liguori                               target_phys_addr_t *plen,
36476d16c2f8Saliguori                               int is_write)
36486d16c2f8Saliguori {
3649c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
3650c227f099SAnthony Liguori     target_phys_addr_t done = 0;
36516d16c2f8Saliguori     int l;
36526d16c2f8Saliguori     uint8_t *ret = NULL;
36536d16c2f8Saliguori     uint8_t *ptr;
3654c227f099SAnthony Liguori     target_phys_addr_t page;
36556d16c2f8Saliguori     unsigned long pd;
36566d16c2f8Saliguori     PhysPageDesc *p;
36576d16c2f8Saliguori     unsigned long addr1;
36586d16c2f8Saliguori 
36596d16c2f8Saliguori     while (len > 0) {
36606d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
36616d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
36626d16c2f8Saliguori         if (l > len)
36636d16c2f8Saliguori             l = len;
36646d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
36656d16c2f8Saliguori         if (!p) {
36666d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
36676d16c2f8Saliguori         } else {
36686d16c2f8Saliguori             pd = p->phys_offset;
36696d16c2f8Saliguori         }
36706d16c2f8Saliguori 
36716d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
36726d16c2f8Saliguori             if (done || bounce.buffer) {
36736d16c2f8Saliguori                 break;
36746d16c2f8Saliguori             }
36756d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
36766d16c2f8Saliguori             bounce.addr = addr;
36776d16c2f8Saliguori             bounce.len = l;
36786d16c2f8Saliguori             if (!is_write) {
36796d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
36806d16c2f8Saliguori             }
36816d16c2f8Saliguori             ptr = bounce.buffer;
36826d16c2f8Saliguori         } else {
36836d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
36845579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
36856d16c2f8Saliguori         }
36866d16c2f8Saliguori         if (!done) {
36876d16c2f8Saliguori             ret = ptr;
36886d16c2f8Saliguori         } else if (ret + done != ptr) {
36896d16c2f8Saliguori             break;
36906d16c2f8Saliguori         }
36916d16c2f8Saliguori 
36926d16c2f8Saliguori         len -= l;
36936d16c2f8Saliguori         addr += l;
36946d16c2f8Saliguori         done += l;
36956d16c2f8Saliguori     }
36966d16c2f8Saliguori     *plen = done;
36976d16c2f8Saliguori     return ret;
36986d16c2f8Saliguori }
36996d16c2f8Saliguori 
37006d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
37016d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
37026d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
37036d16c2f8Saliguori  */
3704c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3705c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
37066d16c2f8Saliguori {
37076d16c2f8Saliguori     if (buffer != bounce.buffer) {
37086d16c2f8Saliguori         if (is_write) {
3709c227f099SAnthony Liguori             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
37106d16c2f8Saliguori             while (access_len) {
37116d16c2f8Saliguori                 unsigned l;
37126d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
37136d16c2f8Saliguori                 if (l > access_len)
37146d16c2f8Saliguori                     l = access_len;
37156d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
37166d16c2f8Saliguori                     /* invalidate code */
37176d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
37186d16c2f8Saliguori                     /* set dirty bit */
3719f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3720f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
37216d16c2f8Saliguori                 }
37226d16c2f8Saliguori                 addr1 += l;
37236d16c2f8Saliguori                 access_len -= l;
37246d16c2f8Saliguori             }
37256d16c2f8Saliguori         }
37266d16c2f8Saliguori         return;
37276d16c2f8Saliguori     }
37286d16c2f8Saliguori     if (is_write) {
37296d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
37306d16c2f8Saliguori     }
3731f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
37326d16c2f8Saliguori     bounce.buffer = NULL;
3733ba223c29Saliguori     cpu_notify_map_clients();
37346d16c2f8Saliguori }
3735d0ecd2aaSbellard 
37368df1cd07Sbellard /* warning: addr must be aligned */
3737c227f099SAnthony Liguori uint32_t ldl_phys(target_phys_addr_t addr)
37388df1cd07Sbellard {
37398df1cd07Sbellard     int io_index;
37408df1cd07Sbellard     uint8_t *ptr;
37418df1cd07Sbellard     uint32_t val;
37428df1cd07Sbellard     unsigned long pd;
37438df1cd07Sbellard     PhysPageDesc *p;
37448df1cd07Sbellard 
37458df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
37468df1cd07Sbellard     if (!p) {
37478df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
37488df1cd07Sbellard     } else {
37498df1cd07Sbellard         pd = p->phys_offset;
37508df1cd07Sbellard     }
37518df1cd07Sbellard 
37522a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
37532a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
37548df1cd07Sbellard         /* I/O case */
37558df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37568da3ff18Spbrook         if (p)
37578da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
37588df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
37598df1cd07Sbellard     } else {
37608df1cd07Sbellard         /* RAM case */
37615579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
37628df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
37638df1cd07Sbellard         val = ldl_p(ptr);
37648df1cd07Sbellard     }
37658df1cd07Sbellard     return val;
37668df1cd07Sbellard }
37678df1cd07Sbellard 
376884b7b8e7Sbellard /* warning: addr must be aligned */
3769c227f099SAnthony Liguori uint64_t ldq_phys(target_phys_addr_t addr)
377084b7b8e7Sbellard {
377184b7b8e7Sbellard     int io_index;
377284b7b8e7Sbellard     uint8_t *ptr;
377384b7b8e7Sbellard     uint64_t val;
377484b7b8e7Sbellard     unsigned long pd;
377584b7b8e7Sbellard     PhysPageDesc *p;
377684b7b8e7Sbellard 
377784b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
377884b7b8e7Sbellard     if (!p) {
377984b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
378084b7b8e7Sbellard     } else {
378184b7b8e7Sbellard         pd = p->phys_offset;
378284b7b8e7Sbellard     }
378384b7b8e7Sbellard 
37842a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
37852a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
378684b7b8e7Sbellard         /* I/O case */
378784b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37888da3ff18Spbrook         if (p)
37898da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
379084b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
379184b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
379284b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
379384b7b8e7Sbellard #else
379484b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
379584b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
379684b7b8e7Sbellard #endif
379784b7b8e7Sbellard     } else {
379884b7b8e7Sbellard         /* RAM case */
37995579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
380084b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
380184b7b8e7Sbellard         val = ldq_p(ptr);
380284b7b8e7Sbellard     }
380384b7b8e7Sbellard     return val;
380484b7b8e7Sbellard }
380584b7b8e7Sbellard 
3806aab33094Sbellard /* XXX: optimize */
3807c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3808aab33094Sbellard {
3809aab33094Sbellard     uint8_t val;
3810aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3811aab33094Sbellard     return val;
3812aab33094Sbellard }
3813aab33094Sbellard 
3814733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
3815c227f099SAnthony Liguori uint32_t lduw_phys(target_phys_addr_t addr)
3816aab33094Sbellard {
3817733f0b02SMichael S. Tsirkin     int io_index;
3818733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3819733f0b02SMichael S. Tsirkin     uint64_t val;
3820733f0b02SMichael S. Tsirkin     unsigned long pd;
3821733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
3822733f0b02SMichael S. Tsirkin 
3823733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3824733f0b02SMichael S. Tsirkin     if (!p) {
3825733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
3826733f0b02SMichael S. Tsirkin     } else {
3827733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
3828733f0b02SMichael S. Tsirkin     }
3829733f0b02SMichael S. Tsirkin 
3830733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3831733f0b02SMichael S. Tsirkin         !(pd & IO_MEM_ROMD)) {
3832733f0b02SMichael S. Tsirkin         /* I/O case */
3833733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3834733f0b02SMichael S. Tsirkin         if (p)
3835733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3836733f0b02SMichael S. Tsirkin         val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3837733f0b02SMichael S. Tsirkin     } else {
3838733f0b02SMichael S. Tsirkin         /* RAM case */
3839733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3840733f0b02SMichael S. Tsirkin             (addr & ~TARGET_PAGE_MASK);
3841733f0b02SMichael S. Tsirkin         val = lduw_p(ptr);
3842733f0b02SMichael S. Tsirkin     }
3843733f0b02SMichael S. Tsirkin     return val;
3844aab33094Sbellard }
3845aab33094Sbellard 
38468df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
38478df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
38488df1cd07Sbellard    bits are used to track modified PTEs */
3849c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
38508df1cd07Sbellard {
38518df1cd07Sbellard     int io_index;
38528df1cd07Sbellard     uint8_t *ptr;
38538df1cd07Sbellard     unsigned long pd;
38548df1cd07Sbellard     PhysPageDesc *p;
38558df1cd07Sbellard 
38568df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
38578df1cd07Sbellard     if (!p) {
38588df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
38598df1cd07Sbellard     } else {
38608df1cd07Sbellard         pd = p->phys_offset;
38618df1cd07Sbellard     }
38628df1cd07Sbellard 
38633a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38648df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38658da3ff18Spbrook         if (p)
38668da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
38678df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
38688df1cd07Sbellard     } else {
386974576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
38705579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
38718df1cd07Sbellard         stl_p(ptr, val);
387274576198Saliguori 
387374576198Saliguori         if (unlikely(in_migration)) {
387474576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
387574576198Saliguori                 /* invalidate code */
387674576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
387774576198Saliguori                 /* set dirty bit */
3878f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
3879f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
388074576198Saliguori             }
388174576198Saliguori         }
38828df1cd07Sbellard     }
38838df1cd07Sbellard }
38848df1cd07Sbellard 
3885c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3886bc98a7efSj_mayer {
3887bc98a7efSj_mayer     int io_index;
3888bc98a7efSj_mayer     uint8_t *ptr;
3889bc98a7efSj_mayer     unsigned long pd;
3890bc98a7efSj_mayer     PhysPageDesc *p;
3891bc98a7efSj_mayer 
3892bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3893bc98a7efSj_mayer     if (!p) {
3894bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3895bc98a7efSj_mayer     } else {
3896bc98a7efSj_mayer         pd = p->phys_offset;
3897bc98a7efSj_mayer     }
3898bc98a7efSj_mayer 
3899bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3900bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
39018da3ff18Spbrook         if (p)
39028da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3903bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3904bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3905bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3906bc98a7efSj_mayer #else
3907bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3908bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3909bc98a7efSj_mayer #endif
3910bc98a7efSj_mayer     } else {
39115579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3912bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3913bc98a7efSj_mayer         stq_p(ptr, val);
3914bc98a7efSj_mayer     }
3915bc98a7efSj_mayer }
3916bc98a7efSj_mayer 
39178df1cd07Sbellard /* warning: addr must be aligned */
3918c227f099SAnthony Liguori void stl_phys(target_phys_addr_t addr, uint32_t val)
39198df1cd07Sbellard {
39208df1cd07Sbellard     int io_index;
39218df1cd07Sbellard     uint8_t *ptr;
39228df1cd07Sbellard     unsigned long pd;
39238df1cd07Sbellard     PhysPageDesc *p;
39248df1cd07Sbellard 
39258df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
39268df1cd07Sbellard     if (!p) {
39278df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
39288df1cd07Sbellard     } else {
39298df1cd07Sbellard         pd = p->phys_offset;
39308df1cd07Sbellard     }
39318df1cd07Sbellard 
39323a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
39338df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
39348da3ff18Spbrook         if (p)
39358da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
39368df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
39378df1cd07Sbellard     } else {
39388df1cd07Sbellard         unsigned long addr1;
39398df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
39408df1cd07Sbellard         /* RAM case */
39415579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
39428df1cd07Sbellard         stl_p(ptr, val);
39433a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
39448df1cd07Sbellard             /* invalidate code */
39458df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
39468df1cd07Sbellard             /* set dirty bit */
3947f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
3948f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
39498df1cd07Sbellard         }
39508df1cd07Sbellard     }
39513a7d929eSbellard }
39528df1cd07Sbellard 
3953aab33094Sbellard /* XXX: optimize */
3954c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3955aab33094Sbellard {
3956aab33094Sbellard     uint8_t v = val;
3957aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3958aab33094Sbellard }
3959aab33094Sbellard 
3960733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
3961c227f099SAnthony Liguori void stw_phys(target_phys_addr_t addr, uint32_t val)
3962aab33094Sbellard {
3963733f0b02SMichael S. Tsirkin     int io_index;
3964733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3965733f0b02SMichael S. Tsirkin     unsigned long pd;
3966733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
3967733f0b02SMichael S. Tsirkin 
3968733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3969733f0b02SMichael S. Tsirkin     if (!p) {
3970733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
3971733f0b02SMichael S. Tsirkin     } else {
3972733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
3973733f0b02SMichael S. Tsirkin     }
3974733f0b02SMichael S. Tsirkin 
3975733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3976733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3977733f0b02SMichael S. Tsirkin         if (p)
3978733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3979733f0b02SMichael S. Tsirkin         io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3980733f0b02SMichael S. Tsirkin     } else {
3981733f0b02SMichael S. Tsirkin         unsigned long addr1;
3982733f0b02SMichael S. Tsirkin         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3983733f0b02SMichael S. Tsirkin         /* RAM case */
3984733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
3985733f0b02SMichael S. Tsirkin         stw_p(ptr, val);
3986733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
3987733f0b02SMichael S. Tsirkin             /* invalidate code */
3988733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3989733f0b02SMichael S. Tsirkin             /* set dirty bit */
3990733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
3991733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
3992733f0b02SMichael S. Tsirkin         }
3993733f0b02SMichael S. Tsirkin     }
3994aab33094Sbellard }
3995aab33094Sbellard 
3996aab33094Sbellard /* XXX: optimize */
3997c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
3998aab33094Sbellard {
3999aab33094Sbellard     val = tswap64(val);
4000aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4001aab33094Sbellard }
4002aab33094Sbellard 
40035e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
4004b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4005b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
400613eb76e0Sbellard {
400713eb76e0Sbellard     int l;
4008c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
40099b3c35e0Sj_mayer     target_ulong page;
401013eb76e0Sbellard 
401113eb76e0Sbellard     while (len > 0) {
401213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
401313eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
401413eb76e0Sbellard         /* if no physical page mapped, return an error */
401513eb76e0Sbellard         if (phys_addr == -1)
401613eb76e0Sbellard             return -1;
401713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
401813eb76e0Sbellard         if (l > len)
401913eb76e0Sbellard             l = len;
40205e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
40215e2972fdSaliguori         if (is_write)
40225e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
40235e2972fdSaliguori         else
40245e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
402513eb76e0Sbellard         len -= l;
402613eb76e0Sbellard         buf += l;
402713eb76e0Sbellard         addr += l;
402813eb76e0Sbellard     }
402913eb76e0Sbellard     return 0;
403013eb76e0Sbellard }
4031a68fe89cSPaul Brook #endif
403213eb76e0Sbellard 
40332e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
40342e70f6efSpbrook    must be at the end of the TB */
40352e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
40362e70f6efSpbrook {
40372e70f6efSpbrook     TranslationBlock *tb;
40382e70f6efSpbrook     uint32_t n, cflags;
40392e70f6efSpbrook     target_ulong pc, cs_base;
40402e70f6efSpbrook     uint64_t flags;
40412e70f6efSpbrook 
40422e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
40432e70f6efSpbrook     if (!tb) {
40442e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
40452e70f6efSpbrook                   retaddr);
40462e70f6efSpbrook     }
40472e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
40482e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
40492e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4050bf20dc07Sths        occurred.  */
40512e70f6efSpbrook     n = n - env->icount_decr.u16.low;
40522e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
40532e70f6efSpbrook     n++;
40542e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
40552e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4056bf20dc07Sths        the first instruction in a TB then re-execute the preceding
40572e70f6efSpbrook        branch.  */
40582e70f6efSpbrook #if defined(TARGET_MIPS)
40592e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
40602e70f6efSpbrook         env->active_tc.PC -= 4;
40612e70f6efSpbrook         env->icount_decr.u16.low++;
40622e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
40632e70f6efSpbrook     }
40642e70f6efSpbrook #elif defined(TARGET_SH4)
40652e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
40662e70f6efSpbrook             && n > 1) {
40672e70f6efSpbrook         env->pc -= 2;
40682e70f6efSpbrook         env->icount_decr.u16.low++;
40692e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
40702e70f6efSpbrook     }
40712e70f6efSpbrook #endif
40722e70f6efSpbrook     /* This should never happen.  */
40732e70f6efSpbrook     if (n > CF_COUNT_MASK)
40742e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
40752e70f6efSpbrook 
40762e70f6efSpbrook     cflags = n | CF_LAST_IO;
40772e70f6efSpbrook     pc = tb->pc;
40782e70f6efSpbrook     cs_base = tb->cs_base;
40792e70f6efSpbrook     flags = tb->flags;
40802e70f6efSpbrook     tb_phys_invalidate(tb, -1);
40812e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
40822e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
40832e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4084bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
40852e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
40862e70f6efSpbrook        repeating the fault, which is horribly inefficient.
40872e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
40882e70f6efSpbrook        second new TB.  */
40892e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
40902e70f6efSpbrook }
40912e70f6efSpbrook 
4092b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4093b3755a91SPaul Brook 
4094e3db7226Sbellard void dump_exec_info(FILE *f,
4095e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4096e3db7226Sbellard {
4097e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4098e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4099e3db7226Sbellard     TranslationBlock *tb;
4100e3db7226Sbellard 
4101e3db7226Sbellard     target_code_size = 0;
4102e3db7226Sbellard     max_target_code_size = 0;
4103e3db7226Sbellard     cross_page = 0;
4104e3db7226Sbellard     direct_jmp_count = 0;
4105e3db7226Sbellard     direct_jmp2_count = 0;
4106e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4107e3db7226Sbellard         tb = &tbs[i];
4108e3db7226Sbellard         target_code_size += tb->size;
4109e3db7226Sbellard         if (tb->size > max_target_code_size)
4110e3db7226Sbellard             max_target_code_size = tb->size;
4111e3db7226Sbellard         if (tb->page_addr[1] != -1)
4112e3db7226Sbellard             cross_page++;
4113e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4114e3db7226Sbellard             direct_jmp_count++;
4115e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4116e3db7226Sbellard                 direct_jmp2_count++;
4117e3db7226Sbellard             }
4118e3db7226Sbellard         }
4119e3db7226Sbellard     }
4120e3db7226Sbellard     /* XXX: avoid using doubles ? */
412157fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
412226a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
412326a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
412426a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
412526a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4126e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4127e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4128e3db7226Sbellard                 max_target_code_size);
4129e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4130e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4131e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4132e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4133e3db7226Sbellard             cross_page,
4134e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4135e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4136e3db7226Sbellard                 direct_jmp_count,
4137e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4138e3db7226Sbellard                 direct_jmp2_count,
4139e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
414057fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4141e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4142e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4143e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4144b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4145e3db7226Sbellard }
4146e3db7226Sbellard 
414761382a50Sbellard #define MMUSUFFIX _cmmu
414861382a50Sbellard #define GETPC() NULL
414961382a50Sbellard #define env cpu_single_env
4150b769d8feSbellard #define SOFTMMU_CODE_ACCESS
415161382a50Sbellard 
415261382a50Sbellard #define SHIFT 0
415361382a50Sbellard #include "softmmu_template.h"
415461382a50Sbellard 
415561382a50Sbellard #define SHIFT 1
415661382a50Sbellard #include "softmmu_template.h"
415761382a50Sbellard 
415861382a50Sbellard #define SHIFT 2
415961382a50Sbellard #include "softmmu_template.h"
416061382a50Sbellard 
416161382a50Sbellard #define SHIFT 3
416261382a50Sbellard #include "softmmu_template.h"
416361382a50Sbellard 
416461382a50Sbellard #undef env
416561382a50Sbellard 
416661382a50Sbellard #endif
4167