xref: /qemu/system/physmem.c (revision f15fbc4bd1a24bd1477a846e63e62c6d435912f8)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
3274576198Saliguori #include "osdep.h"
337ba1e619Saliguori #include "kvm.h"
34432d268cSJun Nakajima #include "hw/xen.h"
3529e922b6SBlue Swirl #include "qemu-timer.h"
3653a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3753a5960aSpbrook #include <qemu.h>
38f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
39f01576f1SJuergen Lock #include <sys/param.h>
40f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
41f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
42f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
43f01576f1SJuergen Lock #include <sys/time.h>
44f01576f1SJuergen Lock #include <sys/proc.h>
45f01576f1SJuergen Lock #include <machine/profile.h>
46f01576f1SJuergen Lock #define _KERNEL
47f01576f1SJuergen Lock #include <sys/user.h>
48f01576f1SJuergen Lock #undef _KERNEL
49f01576f1SJuergen Lock #undef sigqueue
50f01576f1SJuergen Lock #include <libutil.h>
51f01576f1SJuergen Lock #endif
52f01576f1SJuergen Lock #endif
53432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
54432d268cSJun Nakajima #include "xen-mapcache.h"
556506e4f9SStefano Stabellini #include "trace.h"
5653a5960aSpbrook #endif
5754936004Sbellard 
58fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
5966e85a21Sbellard //#define DEBUG_FLUSH
609fa3e853Sbellard //#define DEBUG_TLB
6167d3b957Spbrook //#define DEBUG_UNASSIGNED
62fd6ce8f6Sbellard 
63fd6ce8f6Sbellard /* make various TB consistency checks */
64fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
6598857888Sbellard //#define DEBUG_TLB_CHECK
66fd6ce8f6Sbellard 
671196be37Sths //#define DEBUG_IOPORT
68db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
691196be37Sths 
7099773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7199773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7299773bd4Spbrook #undef DEBUG_TB_CHECK
7399773bd4Spbrook #endif
7499773bd4Spbrook 
759fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
769fa3e853Sbellard 
77bdaf78e0Sblueswir1 static TranslationBlock *tbs;
7824ab68acSStefan Weil static int code_gen_max_blocks;
799fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
80bdaf78e0Sblueswir1 static int nb_tbs;
81eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
82c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
83fd6ce8f6Sbellard 
84141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
85141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
86141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
87d03d860bSblueswir1  section close to code segment. */
88d03d860bSblueswir1 #define code_gen_section                                \
89d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
90d03d860bSblueswir1     __attribute__((aligned (32)))
91f8e2af11SStefan Weil #elif defined(_WIN32)
92f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
93f8e2af11SStefan Weil #define code_gen_section                                \
94f8e2af11SStefan Weil     __attribute__((aligned (16)))
95d03d860bSblueswir1 #else
96d03d860bSblueswir1 #define code_gen_section                                \
97d03d860bSblueswir1     __attribute__((aligned (32)))
98d03d860bSblueswir1 #endif
99d03d860bSblueswir1 
100d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
101bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
102bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10326a5f13bSbellard /* threshold to flush the translated code buffer */
104bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
10524ab68acSStefan Weil static uint8_t *code_gen_ptr;
106fd6ce8f6Sbellard 
107e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1089fa3e853Sbellard int phys_ram_fd;
10974576198Saliguori static int in_migration;
11094a6b54fSpbrook 
111f471a17eSAlex Williamson RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
112e2eef170Spbrook #endif
1139fa3e853Sbellard 
1146a00d601Sbellard CPUState *first_cpu;
1156a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1166a00d601Sbellard    cpu_exec() */
1176a00d601Sbellard CPUState *cpu_single_env;
1182e70f6efSpbrook /* 0 = Do not count executed instructions.
119bf20dc07Sths    1 = Precise instruction counting.
1202e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1212e70f6efSpbrook int use_icount = 0;
1222e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1232e70f6efSpbrook    include some instructions that have not yet been executed.  */
1242e70f6efSpbrook int64_t qemu_icount;
1256a00d601Sbellard 
12654936004Sbellard typedef struct PageDesc {
12792e873b9Sbellard     /* list of TBs intersecting this ram page */
128fd6ce8f6Sbellard     TranslationBlock *first_tb;
1299fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1309fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1319fa3e853Sbellard     unsigned int code_write_count;
1329fa3e853Sbellard     uint8_t *code_bitmap;
1339fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1349fa3e853Sbellard     unsigned long flags;
1359fa3e853Sbellard #endif
13654936004Sbellard } PageDesc;
13754936004Sbellard 
13841c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1395cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1405cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14141c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
14241c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
14341c1b1c9SPaul Brook #else
1445cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
14541c1b1c9SPaul Brook #endif
146bedb69eaSj_mayer #else
1475cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
148bedb69eaSj_mayer #endif
14954936004Sbellard 
1505cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1515cd2c5b6SRichard Henderson #define L2_BITS 10
15254936004Sbellard #define L2_SIZE (1 << L2_BITS)
15354936004Sbellard 
1545cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1555cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1565cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1575cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1585cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1595cd2c5b6SRichard Henderson 
1605cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1615cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1625cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1635cd2c5b6SRichard Henderson #else
1645cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1655cd2c5b6SRichard Henderson #endif
1665cd2c5b6SRichard Henderson 
1675cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1685cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1695cd2c5b6SRichard Henderson #else
1705cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1715cd2c5b6SRichard Henderson #endif
1725cd2c5b6SRichard Henderson 
1735cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1745cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1755cd2c5b6SRichard Henderson 
1765cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1775cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1785cd2c5b6SRichard Henderson 
17983fb7adfSbellard unsigned long qemu_real_host_page_size;
18083fb7adfSbellard unsigned long qemu_host_page_bits;
18183fb7adfSbellard unsigned long qemu_host_page_size;
18283fb7adfSbellard unsigned long qemu_host_page_mask;
18354936004Sbellard 
1845cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1855cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1865cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18754936004Sbellard 
188e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
18941c1b1c9SPaul Brook typedef struct PhysPageDesc {
19041c1b1c9SPaul Brook     /* offset in host memory of the page + io_index in the low bits */
19141c1b1c9SPaul Brook     ram_addr_t phys_offset;
19241c1b1c9SPaul Brook     ram_addr_t region_offset;
19341c1b1c9SPaul Brook } PhysPageDesc;
19441c1b1c9SPaul Brook 
1955cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
1965cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
1975cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
1986d9a1304SPaul Brook 
199e2eef170Spbrook static void io_mem_init(void);
200e2eef170Spbrook 
20133417e70Sbellard /* io memory support */
20233417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
20333417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
204a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
205511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2066658ffb8Spbrook static int io_mem_watch;
2076658ffb8Spbrook #endif
20833417e70Sbellard 
20934865134Sbellard /* log support */
2101e8b27caSJuha Riihimäki #ifdef WIN32
2111e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2121e8b27caSJuha Riihimäki #else
213d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2141e8b27caSJuha Riihimäki #endif
21534865134Sbellard FILE *logfile;
21634865134Sbellard int loglevel;
217e735b91cSpbrook static int log_append = 0;
21834865134Sbellard 
219e3db7226Sbellard /* statistics */
220b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
221e3db7226Sbellard static int tlb_flush_count;
222b3755a91SPaul Brook #endif
223e3db7226Sbellard static int tb_flush_count;
224e3db7226Sbellard static int tb_phys_invalidate_count;
225e3db7226Sbellard 
2267cb69caeSbellard #ifdef _WIN32
2277cb69caeSbellard static void map_exec(void *addr, long size)
2287cb69caeSbellard {
2297cb69caeSbellard     DWORD old_protect;
2307cb69caeSbellard     VirtualProtect(addr, size,
2317cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2327cb69caeSbellard 
2337cb69caeSbellard }
2347cb69caeSbellard #else
2357cb69caeSbellard static void map_exec(void *addr, long size)
2367cb69caeSbellard {
2374369415fSbellard     unsigned long start, end, page_size;
2387cb69caeSbellard 
2394369415fSbellard     page_size = getpagesize();
2407cb69caeSbellard     start = (unsigned long)addr;
2414369415fSbellard     start &= ~(page_size - 1);
2427cb69caeSbellard 
2437cb69caeSbellard     end = (unsigned long)addr + size;
2444369415fSbellard     end += page_size - 1;
2454369415fSbellard     end &= ~(page_size - 1);
2467cb69caeSbellard 
2477cb69caeSbellard     mprotect((void *)start, end - start,
2487cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2497cb69caeSbellard }
2507cb69caeSbellard #endif
2517cb69caeSbellard 
252b346ff46Sbellard static void page_init(void)
25354936004Sbellard {
25483fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
25554936004Sbellard        TARGET_PAGE_SIZE */
256c2b48b69Saliguori #ifdef _WIN32
257c2b48b69Saliguori     {
258c2b48b69Saliguori         SYSTEM_INFO system_info;
259c2b48b69Saliguori 
260c2b48b69Saliguori         GetSystemInfo(&system_info);
261c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
262c2b48b69Saliguori     }
263c2b48b69Saliguori #else
264c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
265c2b48b69Saliguori #endif
26683fb7adfSbellard     if (qemu_host_page_size == 0)
26783fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26883fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
26983fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
27083fb7adfSbellard     qemu_host_page_bits = 0;
27183fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
27283fb7adfSbellard         qemu_host_page_bits++;
27383fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
27450a9569bSbalrog 
2752e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27650a9569bSbalrog     {
277f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
278f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
279f01576f1SJuergen Lock         int i, cnt;
280f01576f1SJuergen Lock 
281f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
282f01576f1SJuergen Lock         if (freep) {
283f01576f1SJuergen Lock             mmap_lock();
284f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
285f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
286f01576f1SJuergen Lock 
287f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
288f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
289f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
290f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291f01576f1SJuergen Lock 
292f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
293f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
294fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
295f01576f1SJuergen Lock                     } else {
296f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297f01576f1SJuergen Lock                         endaddr = ~0ul;
298fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299f01576f1SJuergen Lock #endif
300f01576f1SJuergen Lock                     }
301f01576f1SJuergen Lock                 }
302f01576f1SJuergen Lock             }
303f01576f1SJuergen Lock             free(freep);
304f01576f1SJuergen Lock             mmap_unlock();
305f01576f1SJuergen Lock         }
306f01576f1SJuergen Lock #else
30750a9569bSbalrog         FILE *f;
30850a9569bSbalrog 
3090776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3105cd2c5b6SRichard Henderson 
311fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
31250a9569bSbalrog         if (f) {
3135cd2c5b6SRichard Henderson             mmap_lock();
3145cd2c5b6SRichard Henderson 
31550a9569bSbalrog             do {
3165cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3175cd2c5b6SRichard Henderson                 int n;
3185cd2c5b6SRichard Henderson 
3195cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3205cd2c5b6SRichard Henderson 
3215cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3225cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3235cd2c5b6SRichard Henderson 
3245cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3255cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3265cd2c5b6SRichard Henderson                     } else {
3275cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3285cd2c5b6SRichard Henderson                     }
3295cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
33050a9569bSbalrog                 }
33150a9569bSbalrog             } while (!feof(f));
3325cd2c5b6SRichard Henderson 
33350a9569bSbalrog             fclose(f);
334c8a706feSpbrook             mmap_unlock();
33550a9569bSbalrog         }
336f01576f1SJuergen Lock #endif
3375cd2c5b6SRichard Henderson     }
33850a9569bSbalrog #endif
33954936004Sbellard }
34054936004Sbellard 
34141c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
34254936004Sbellard {
34341c1b1c9SPaul Brook     PageDesc *pd;
34441c1b1c9SPaul Brook     void **lp;
34541c1b1c9SPaul Brook     int i;
34641c1b1c9SPaul Brook 
34717e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3482e9a5713SPaul Brook     /* We can't use qemu_malloc because it may recurse into a locked mutex. */
3495cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3505cd2c5b6SRichard Henderson     do {                                                \
3515cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3525cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3535cd2c5b6SRichard Henderson     } while (0)
3545cd2c5b6SRichard Henderson #else
3555cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3565cd2c5b6SRichard Henderson     do { P = qemu_mallocz(SIZE); } while (0)
3575cd2c5b6SRichard Henderson #endif
3585cd2c5b6SRichard Henderson 
3595cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3605cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3615cd2c5b6SRichard Henderson 
3625cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3635cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3645cd2c5b6SRichard Henderson         void **p = *lp;
3655cd2c5b6SRichard Henderson 
3665cd2c5b6SRichard Henderson         if (p == NULL) {
3675cd2c5b6SRichard Henderson             if (!alloc) {
3685cd2c5b6SRichard Henderson                 return NULL;
3695cd2c5b6SRichard Henderson             }
3705cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
37154936004Sbellard             *lp = p;
3725cd2c5b6SRichard Henderson         }
3735cd2c5b6SRichard Henderson 
3745cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3755cd2c5b6SRichard Henderson     }
3765cd2c5b6SRichard Henderson 
3775cd2c5b6SRichard Henderson     pd = *lp;
3785cd2c5b6SRichard Henderson     if (pd == NULL) {
3795cd2c5b6SRichard Henderson         if (!alloc) {
3805cd2c5b6SRichard Henderson             return NULL;
3815cd2c5b6SRichard Henderson         }
3825cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3835cd2c5b6SRichard Henderson         *lp = pd;
3845cd2c5b6SRichard Henderson     }
3855cd2c5b6SRichard Henderson 
3865cd2c5b6SRichard Henderson #undef ALLOC
3875cd2c5b6SRichard Henderson 
3885cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
38954936004Sbellard }
39054936004Sbellard 
39141c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
39254936004Sbellard {
3935cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
39454936004Sbellard }
39554936004Sbellard 
3966d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
397c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
39892e873b9Sbellard {
399e3f4e2a4Spbrook     PhysPageDesc *pd;
4005cd2c5b6SRichard Henderson     void **lp;
401e3f4e2a4Spbrook     int i;
4025cd2c5b6SRichard Henderson 
4035cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
4045cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
4055cd2c5b6SRichard Henderson 
4065cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
4075cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
4085cd2c5b6SRichard Henderson         void **p = *lp;
4095cd2c5b6SRichard Henderson         if (p == NULL) {
4105cd2c5b6SRichard Henderson             if (!alloc) {
411108c49b8Sbellard                 return NULL;
4125cd2c5b6SRichard Henderson             }
4135cd2c5b6SRichard Henderson             *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
4145cd2c5b6SRichard Henderson         }
4155cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4165cd2c5b6SRichard Henderson     }
4175cd2c5b6SRichard Henderson 
4185cd2c5b6SRichard Henderson     pd = *lp;
4195cd2c5b6SRichard Henderson     if (pd == NULL) {
4205cd2c5b6SRichard Henderson         int i;
4215cd2c5b6SRichard Henderson 
4225cd2c5b6SRichard Henderson         if (!alloc) {
4235cd2c5b6SRichard Henderson             return NULL;
4245cd2c5b6SRichard Henderson         }
4255cd2c5b6SRichard Henderson 
4265cd2c5b6SRichard Henderson         *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4275cd2c5b6SRichard Henderson 
42867c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
429e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
43067c4d23cSpbrook             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
43167c4d23cSpbrook         }
43292e873b9Sbellard     }
4335cd2c5b6SRichard Henderson 
4345cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
43592e873b9Sbellard }
43692e873b9Sbellard 
437c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
43892e873b9Sbellard {
439108c49b8Sbellard     return phys_page_find_alloc(index, 0);
44092e873b9Sbellard }
44192e873b9Sbellard 
442c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
443c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4443a7d929eSbellard                                     target_ulong vaddr);
445c8a706feSpbrook #define mmap_lock() do { } while(0)
446c8a706feSpbrook #define mmap_unlock() do { } while(0)
4479fa3e853Sbellard #endif
448fd6ce8f6Sbellard 
4494369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4504369415fSbellard 
4514369415fSbellard #if defined(CONFIG_USER_ONLY)
452ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4534369415fSbellard    user mode. It will change when a dedicated libc will be used */
4544369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4554369415fSbellard #endif
4564369415fSbellard 
4574369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
458ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
4604369415fSbellard #endif
4614369415fSbellard 
4628fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
46326a5f13bSbellard {
4644369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4654369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4664369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4674369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4684369415fSbellard #else
46926a5f13bSbellard     code_gen_buffer_size = tb_size;
47026a5f13bSbellard     if (code_gen_buffer_size == 0) {
4714369415fSbellard #if defined(CONFIG_USER_ONLY)
4724369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4734369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4744369415fSbellard #else
475ccbb4d44SStuart Brady         /* XXX: needs adjustments */
47694a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4774369415fSbellard #endif
47826a5f13bSbellard     }
47926a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
48026a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
48126a5f13bSbellard     /* The code gen buffer location may have constraints depending on
48226a5f13bSbellard        the host cpu and OS */
48326a5f13bSbellard #if defined(__linux__)
48426a5f13bSbellard     {
48526a5f13bSbellard         int flags;
486141ac468Sblueswir1         void *start = NULL;
487141ac468Sblueswir1 
48826a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
48926a5f13bSbellard #if defined(__x86_64__)
49026a5f13bSbellard         flags |= MAP_32BIT;
49126a5f13bSbellard         /* Cannot map more than that */
49226a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
49326a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
494141ac468Sblueswir1 #elif defined(__sparc_v9__)
495141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
496141ac468Sblueswir1         flags |= MAP_FIXED;
497141ac468Sblueswir1         start = (void *) 0x60000000UL;
498141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
499141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5001cb0661eSbalrog #elif defined(__arm__)
50163d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
5021cb0661eSbalrog         flags |= MAP_FIXED;
5031cb0661eSbalrog         start = (void *) 0x01000000UL;
5041cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5051cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
506eba0b893SRichard Henderson #elif defined(__s390x__)
507eba0b893SRichard Henderson         /* Map the buffer so that we can use direct calls and branches.  */
508eba0b893SRichard Henderson         /* We have a +- 4GB range on the branches; leave some slop.  */
509eba0b893SRichard Henderson         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510eba0b893SRichard Henderson             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511eba0b893SRichard Henderson         }
512eba0b893SRichard Henderson         start = (void *)0x90000000UL;
51326a5f13bSbellard #endif
514141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
51526a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
51626a5f13bSbellard                                flags, -1, 0);
51726a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
51826a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
51926a5f13bSbellard             exit(1);
52026a5f13bSbellard         }
52126a5f13bSbellard     }
522cbb608a5SBrad #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523cbb608a5SBrad     || defined(__DragonFly__) || defined(__OpenBSD__)
52406e67a82Saliguori     {
52506e67a82Saliguori         int flags;
52606e67a82Saliguori         void *addr = NULL;
52706e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
52806e67a82Saliguori #if defined(__x86_64__)
52906e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
53006e67a82Saliguori          * 0x40000000 is free */
53106e67a82Saliguori         flags |= MAP_FIXED;
53206e67a82Saliguori         addr = (void *)0x40000000;
53306e67a82Saliguori         /* Cannot map more than that */
53406e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
53506e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
5364cd31ad2SBlue Swirl #elif defined(__sparc_v9__)
5374cd31ad2SBlue Swirl         // Map the buffer below 2G, so we can use direct calls and branches
5384cd31ad2SBlue Swirl         flags |= MAP_FIXED;
5394cd31ad2SBlue Swirl         addr = (void *) 0x60000000UL;
5404cd31ad2SBlue Swirl         if (code_gen_buffer_size > (512 * 1024 * 1024)) {
5414cd31ad2SBlue Swirl             code_gen_buffer_size = (512 * 1024 * 1024);
5424cd31ad2SBlue Swirl         }
54306e67a82Saliguori #endif
54406e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
54506e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
54606e67a82Saliguori                                flags, -1, 0);
54706e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
54806e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
54906e67a82Saliguori             exit(1);
55006e67a82Saliguori         }
55106e67a82Saliguori     }
55226a5f13bSbellard #else
55326a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
55426a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
55526a5f13bSbellard #endif
5564369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
55726a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
55826a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
559a884da8aSPeter Maydell         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
56026a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
56126a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
56226a5f13bSbellard }
56326a5f13bSbellard 
56426a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
56526a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
56626a5f13bSbellard    size. */
56726a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
56826a5f13bSbellard {
56926a5f13bSbellard     cpu_gen_init();
57026a5f13bSbellard     code_gen_alloc(tb_size);
57126a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5724369415fSbellard     page_init();
573e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
57426a5f13bSbellard     io_mem_init();
575e2eef170Spbrook #endif
5769002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
5779002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
5789002ec79SRichard Henderson        initialize the prologue now.  */
5799002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
5809002ec79SRichard Henderson #endif
58126a5f13bSbellard }
58226a5f13bSbellard 
5839656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5849656f324Spbrook 
585e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
586e7f4eff7SJuan Quintela {
587e7f4eff7SJuan Quintela     CPUState *env = opaque;
588e7f4eff7SJuan Quintela 
5893098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5903098dba0Saurel32        version_id is increased. */
5913098dba0Saurel32     env->interrupt_request &= ~0x01;
5929656f324Spbrook     tlb_flush(env, 1);
5939656f324Spbrook 
5949656f324Spbrook     return 0;
5959656f324Spbrook }
596e7f4eff7SJuan Quintela 
597e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
598e7f4eff7SJuan Quintela     .name = "cpu_common",
599e7f4eff7SJuan Quintela     .version_id = 1,
600e7f4eff7SJuan Quintela     .minimum_version_id = 1,
601e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
602e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
603e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
604e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
605e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
606e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
607e7f4eff7SJuan Quintela     }
608e7f4eff7SJuan Quintela };
6099656f324Spbrook #endif
6109656f324Spbrook 
611950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
612950f1472SGlauber Costa {
613950f1472SGlauber Costa     CPUState *env = first_cpu;
614950f1472SGlauber Costa 
615950f1472SGlauber Costa     while (env) {
616950f1472SGlauber Costa         if (env->cpu_index == cpu)
617950f1472SGlauber Costa             break;
618950f1472SGlauber Costa         env = env->next_cpu;
619950f1472SGlauber Costa     }
620950f1472SGlauber Costa 
621950f1472SGlauber Costa     return env;
622950f1472SGlauber Costa }
623950f1472SGlauber Costa 
6246a00d601Sbellard void cpu_exec_init(CPUState *env)
625fd6ce8f6Sbellard {
6266a00d601Sbellard     CPUState **penv;
6276a00d601Sbellard     int cpu_index;
6286a00d601Sbellard 
629c2764719Spbrook #if defined(CONFIG_USER_ONLY)
630c2764719Spbrook     cpu_list_lock();
631c2764719Spbrook #endif
6326a00d601Sbellard     env->next_cpu = NULL;
6336a00d601Sbellard     penv = &first_cpu;
6346a00d601Sbellard     cpu_index = 0;
6356a00d601Sbellard     while (*penv != NULL) {
6361e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6376a00d601Sbellard         cpu_index++;
6386a00d601Sbellard     }
6396a00d601Sbellard     env->cpu_index = cpu_index;
640268a362cSaliguori     env->numa_node = 0;
64172cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
64272cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
643dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
644dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
645dc7a09cfSJan Kiszka #endif
6466a00d601Sbellard     *penv = env;
647c2764719Spbrook #if defined(CONFIG_USER_ONLY)
648c2764719Spbrook     cpu_list_unlock();
649c2764719Spbrook #endif
650b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6510be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
6520be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
653b3c7724cSpbrook                     cpu_save, cpu_load, env);
654b3c7724cSpbrook #endif
655fd6ce8f6Sbellard }
656fd6ce8f6Sbellard 
657d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
658d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
659d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
660d1a1eb74STristan Gingold {
661d1a1eb74STristan Gingold     TranslationBlock *tb;
662d1a1eb74STristan Gingold 
663d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
664d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665d1a1eb74STristan Gingold         return NULL;
666d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
667d1a1eb74STristan Gingold     tb->pc = pc;
668d1a1eb74STristan Gingold     tb->cflags = 0;
669d1a1eb74STristan Gingold     return tb;
670d1a1eb74STristan Gingold }
671d1a1eb74STristan Gingold 
672d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
673d1a1eb74STristan Gingold {
674d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
675d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
676d1a1eb74STristan Gingold        be the last one generated.  */
677d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
679d1a1eb74STristan Gingold         nb_tbs--;
680d1a1eb74STristan Gingold     }
681d1a1eb74STristan Gingold }
682d1a1eb74STristan Gingold 
6839fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6849fa3e853Sbellard {
6859fa3e853Sbellard     if (p->code_bitmap) {
68659817ccbSbellard         qemu_free(p->code_bitmap);
6879fa3e853Sbellard         p->code_bitmap = NULL;
6889fa3e853Sbellard     }
6899fa3e853Sbellard     p->code_write_count = 0;
6909fa3e853Sbellard }
6919fa3e853Sbellard 
6925cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
6935cd2c5b6SRichard Henderson 
6945cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
6955cd2c5b6SRichard Henderson {
6965cd2c5b6SRichard Henderson     int i;
6975cd2c5b6SRichard Henderson 
6985cd2c5b6SRichard Henderson     if (*lp == NULL) {
6995cd2c5b6SRichard Henderson         return;
7005cd2c5b6SRichard Henderson     }
7015cd2c5b6SRichard Henderson     if (level == 0) {
7025cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7037296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7045cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7055cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7065cd2c5b6SRichard Henderson         }
7075cd2c5b6SRichard Henderson     } else {
7085cd2c5b6SRichard Henderson         void **pp = *lp;
7097296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7105cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7115cd2c5b6SRichard Henderson         }
7125cd2c5b6SRichard Henderson     }
7135cd2c5b6SRichard Henderson }
7145cd2c5b6SRichard Henderson 
715fd6ce8f6Sbellard static void page_flush_tb(void)
716fd6ce8f6Sbellard {
7175cd2c5b6SRichard Henderson     int i;
7185cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7195cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
720fd6ce8f6Sbellard     }
721fd6ce8f6Sbellard }
722fd6ce8f6Sbellard 
723fd6ce8f6Sbellard /* flush all the translation blocks */
724d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
7256a00d601Sbellard void tb_flush(CPUState *env1)
726fd6ce8f6Sbellard {
7276a00d601Sbellard     CPUState *env;
7280124311eSbellard #if defined(DEBUG_FLUSH)
729ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
731ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
732ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
733fd6ce8f6Sbellard #endif
73426a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
735a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
736a208e54aSpbrook 
737fd6ce8f6Sbellard     nb_tbs = 0;
7386a00d601Sbellard 
7396a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7408a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
7416a00d601Sbellard     }
7429fa3e853Sbellard 
7438a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
744fd6ce8f6Sbellard     page_flush_tb();
7459fa3e853Sbellard 
746fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
747d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
748d4e8164fSbellard        expensive */
749e3db7226Sbellard     tb_flush_count++;
750fd6ce8f6Sbellard }
751fd6ce8f6Sbellard 
752fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
753fd6ce8f6Sbellard 
754bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
755fd6ce8f6Sbellard {
756fd6ce8f6Sbellard     TranslationBlock *tb;
757fd6ce8f6Sbellard     int i;
758fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
75999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
76099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
761fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
7630bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7640bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
76599773bd4Spbrook                        address, (long)tb->pc, tb->size);
766fd6ce8f6Sbellard             }
767fd6ce8f6Sbellard         }
768fd6ce8f6Sbellard     }
769fd6ce8f6Sbellard }
770fd6ce8f6Sbellard 
771fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
772fd6ce8f6Sbellard static void tb_page_check(void)
773fd6ce8f6Sbellard {
774fd6ce8f6Sbellard     TranslationBlock *tb;
775fd6ce8f6Sbellard     int i, flags1, flags2;
776fd6ce8f6Sbellard 
77799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
77899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
779fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
780fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
781fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
78399773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
784fd6ce8f6Sbellard             }
785fd6ce8f6Sbellard         }
786fd6ce8f6Sbellard     }
787fd6ce8f6Sbellard }
788fd6ce8f6Sbellard 
789fd6ce8f6Sbellard #endif
790fd6ce8f6Sbellard 
791fd6ce8f6Sbellard /* invalidate one TB */
792fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793fd6ce8f6Sbellard                              int next_offset)
794fd6ce8f6Sbellard {
795fd6ce8f6Sbellard     TranslationBlock *tb1;
796fd6ce8f6Sbellard     for(;;) {
797fd6ce8f6Sbellard         tb1 = *ptb;
798fd6ce8f6Sbellard         if (tb1 == tb) {
799fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800fd6ce8f6Sbellard             break;
801fd6ce8f6Sbellard         }
802fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803fd6ce8f6Sbellard     }
804fd6ce8f6Sbellard }
805fd6ce8f6Sbellard 
8069fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8079fa3e853Sbellard {
8089fa3e853Sbellard     TranslationBlock *tb1;
8099fa3e853Sbellard     unsigned int n1;
8109fa3e853Sbellard 
8119fa3e853Sbellard     for(;;) {
8129fa3e853Sbellard         tb1 = *ptb;
8139fa3e853Sbellard         n1 = (long)tb1 & 3;
8149fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8159fa3e853Sbellard         if (tb1 == tb) {
8169fa3e853Sbellard             *ptb = tb1->page_next[n1];
8179fa3e853Sbellard             break;
8189fa3e853Sbellard         }
8199fa3e853Sbellard         ptb = &tb1->page_next[n1];
8209fa3e853Sbellard     }
8219fa3e853Sbellard }
8229fa3e853Sbellard 
823d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824d4e8164fSbellard {
825d4e8164fSbellard     TranslationBlock *tb1, **ptb;
826d4e8164fSbellard     unsigned int n1;
827d4e8164fSbellard 
828d4e8164fSbellard     ptb = &tb->jmp_next[n];
829d4e8164fSbellard     tb1 = *ptb;
830d4e8164fSbellard     if (tb1) {
831d4e8164fSbellard         /* find tb(n) in circular list */
832d4e8164fSbellard         for(;;) {
833d4e8164fSbellard             tb1 = *ptb;
834d4e8164fSbellard             n1 = (long)tb1 & 3;
835d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
836d4e8164fSbellard             if (n1 == n && tb1 == tb)
837d4e8164fSbellard                 break;
838d4e8164fSbellard             if (n1 == 2) {
839d4e8164fSbellard                 ptb = &tb1->jmp_first;
840d4e8164fSbellard             } else {
841d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
842d4e8164fSbellard             }
843d4e8164fSbellard         }
844d4e8164fSbellard         /* now we can suppress tb(n) from the list */
845d4e8164fSbellard         *ptb = tb->jmp_next[n];
846d4e8164fSbellard 
847d4e8164fSbellard         tb->jmp_next[n] = NULL;
848d4e8164fSbellard     }
849d4e8164fSbellard }
850d4e8164fSbellard 
851d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
852d4e8164fSbellard    another TB */
853d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
854d4e8164fSbellard {
855d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856d4e8164fSbellard }
857d4e8164fSbellard 
85841c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
859fd6ce8f6Sbellard {
8606a00d601Sbellard     CPUState *env;
861fd6ce8f6Sbellard     PageDesc *p;
8628a40a180Sbellard     unsigned int h, n1;
86341c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
8648a40a180Sbellard     TranslationBlock *tb1, *tb2;
865fd6ce8f6Sbellard 
8669fa3e853Sbellard     /* remove the TB from the hash list */
8679fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8689fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8699fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8709fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8719fa3e853Sbellard 
8729fa3e853Sbellard     /* remove the TB from the page list */
8739fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8749fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8759fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8769fa3e853Sbellard         invalidate_page_bitmap(p);
8779fa3e853Sbellard     }
8789fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8799fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8809fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8819fa3e853Sbellard         invalidate_page_bitmap(p);
8829fa3e853Sbellard     }
8839fa3e853Sbellard 
8848a40a180Sbellard     tb_invalidated_flag = 1;
8858a40a180Sbellard 
8868a40a180Sbellard     /* remove the TB from the hash list */
8878a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8886a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8896a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8906a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8916a00d601Sbellard     }
8928a40a180Sbellard 
8938a40a180Sbellard     /* suppress this TB from the two jump lists */
8948a40a180Sbellard     tb_jmp_remove(tb, 0);
8958a40a180Sbellard     tb_jmp_remove(tb, 1);
8968a40a180Sbellard 
8978a40a180Sbellard     /* suppress any remaining jumps to this TB */
8988a40a180Sbellard     tb1 = tb->jmp_first;
8998a40a180Sbellard     for(;;) {
9008a40a180Sbellard         n1 = (long)tb1 & 3;
9018a40a180Sbellard         if (n1 == 2)
9028a40a180Sbellard             break;
9038a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
9048a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9058a40a180Sbellard         tb_reset_jump(tb1, n1);
9068a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9078a40a180Sbellard         tb1 = tb2;
9088a40a180Sbellard     }
9098a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9108a40a180Sbellard 
911e3db7226Sbellard     tb_phys_invalidate_count++;
9129fa3e853Sbellard }
9139fa3e853Sbellard 
9149fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9159fa3e853Sbellard {
9169fa3e853Sbellard     int end, mask, end1;
9179fa3e853Sbellard 
9189fa3e853Sbellard     end = start + len;
9199fa3e853Sbellard     tab += start >> 3;
9209fa3e853Sbellard     mask = 0xff << (start & 7);
9219fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
9229fa3e853Sbellard         if (start < end) {
9239fa3e853Sbellard             mask &= ~(0xff << (end & 7));
9249fa3e853Sbellard             *tab |= mask;
9259fa3e853Sbellard         }
9269fa3e853Sbellard     } else {
9279fa3e853Sbellard         *tab++ |= mask;
9289fa3e853Sbellard         start = (start + 8) & ~7;
9299fa3e853Sbellard         end1 = end & ~7;
9309fa3e853Sbellard         while (start < end1) {
9319fa3e853Sbellard             *tab++ = 0xff;
9329fa3e853Sbellard             start += 8;
9339fa3e853Sbellard         }
9349fa3e853Sbellard         if (start < end) {
9359fa3e853Sbellard             mask = ~(0xff << (end & 7));
9369fa3e853Sbellard             *tab |= mask;
9379fa3e853Sbellard         }
9389fa3e853Sbellard     }
9399fa3e853Sbellard }
9409fa3e853Sbellard 
9419fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
9429fa3e853Sbellard {
9439fa3e853Sbellard     int n, tb_start, tb_end;
9449fa3e853Sbellard     TranslationBlock *tb;
9459fa3e853Sbellard 
946b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9479fa3e853Sbellard 
9489fa3e853Sbellard     tb = p->first_tb;
9499fa3e853Sbellard     while (tb != NULL) {
9509fa3e853Sbellard         n = (long)tb & 3;
9519fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9529fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9539fa3e853Sbellard         if (n == 0) {
9549fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9559fa3e853Sbellard                it is not a problem */
9569fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
9579fa3e853Sbellard             tb_end = tb_start + tb->size;
9589fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
9599fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
9609fa3e853Sbellard         } else {
9619fa3e853Sbellard             tb_start = 0;
9629fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9639fa3e853Sbellard         }
9649fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9659fa3e853Sbellard         tb = tb->page_next[n];
9669fa3e853Sbellard     }
9679fa3e853Sbellard }
9689fa3e853Sbellard 
9692e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9702e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9712e70f6efSpbrook                               int flags, int cflags)
972d720b93dSbellard {
973d720b93dSbellard     TranslationBlock *tb;
974d720b93dSbellard     uint8_t *tc_ptr;
97541c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
97641c1b1c9SPaul Brook     target_ulong virt_page2;
977d720b93dSbellard     int code_gen_size;
978d720b93dSbellard 
97941c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
980c27004ecSbellard     tb = tb_alloc(pc);
981d720b93dSbellard     if (!tb) {
982d720b93dSbellard         /* flush must be done */
983d720b93dSbellard         tb_flush(env);
984d720b93dSbellard         /* cannot fail at this point */
985c27004ecSbellard         tb = tb_alloc(pc);
9862e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9872e70f6efSpbrook         tb_invalidated_flag = 1;
988d720b93dSbellard     }
989d720b93dSbellard     tc_ptr = code_gen_ptr;
990d720b93dSbellard     tb->tc_ptr = tc_ptr;
991d720b93dSbellard     tb->cs_base = cs_base;
992d720b93dSbellard     tb->flags = flags;
993d720b93dSbellard     tb->cflags = cflags;
994d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
995d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
996d720b93dSbellard 
997d720b93dSbellard     /* check next page if needed */
998c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
999d720b93dSbellard     phys_page2 = -1;
1000c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
100141c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1002d720b93dSbellard     }
100341c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10042e70f6efSpbrook     return tb;
1005d720b93dSbellard }
1006d720b93dSbellard 
10079fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
10089fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
1009d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
1010d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
1011d720b93dSbellard    TB if code is modified inside this TB. */
101241c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1013d720b93dSbellard                                    int is_cpu_write_access)
10149fa3e853Sbellard {
10156b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
1016d720b93dSbellard     CPUState *env = cpu_single_env;
101741c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
10186b917547Saliguori     PageDesc *p;
10196b917547Saliguori     int n;
10206b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
10216b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
10226b917547Saliguori     TranslationBlock *current_tb = NULL;
10236b917547Saliguori     int current_tb_modified = 0;
10246b917547Saliguori     target_ulong current_pc = 0;
10256b917547Saliguori     target_ulong current_cs_base = 0;
10266b917547Saliguori     int current_flags = 0;
10276b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
10289fa3e853Sbellard 
10299fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10309fa3e853Sbellard     if (!p)
10319fa3e853Sbellard         return;
10329fa3e853Sbellard     if (!p->code_bitmap &&
1033d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034d720b93dSbellard         is_cpu_write_access) {
10359fa3e853Sbellard         /* build code bitmap */
10369fa3e853Sbellard         build_page_bitmap(p);
10379fa3e853Sbellard     }
10389fa3e853Sbellard 
10399fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
10409fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
10419fa3e853Sbellard     tb = p->first_tb;
10429fa3e853Sbellard     while (tb != NULL) {
10439fa3e853Sbellard         n = (long)tb & 3;
10449fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
10459fa3e853Sbellard         tb_next = tb->page_next[n];
10469fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10479fa3e853Sbellard         if (n == 0) {
10489fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10499fa3e853Sbellard                it is not a problem */
10509fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
10519fa3e853Sbellard             tb_end = tb_start + tb->size;
10529fa3e853Sbellard         } else {
10539fa3e853Sbellard             tb_start = tb->page_addr[1];
10549fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10559fa3e853Sbellard         }
10569fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1057d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1058d720b93dSbellard             if (current_tb_not_found) {
1059d720b93dSbellard                 current_tb_not_found = 0;
1060d720b93dSbellard                 current_tb = NULL;
10612e70f6efSpbrook                 if (env->mem_io_pc) {
1062d720b93dSbellard                     /* now we have a real cpu fault */
10632e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1064d720b93dSbellard                 }
1065d720b93dSbellard             }
1066d720b93dSbellard             if (current_tb == tb &&
10672e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1069d720b93dSbellard                 its execution. We could be more precise by checking
1070d720b93dSbellard                 that the modification is after the current PC, but it
1071d720b93dSbellard                 would require a specialized function to partially
1072d720b93dSbellard                 restore the CPU state */
1073d720b93dSbellard 
1074d720b93dSbellard                 current_tb_modified = 1;
1075618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
10766b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10776b917547Saliguori                                      &current_flags);
1078d720b93dSbellard             }
1079d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10806f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10816f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10826f5a9f7eSbellard             saved_tb = NULL;
10836f5a9f7eSbellard             if (env) {
1084ea1c1802Sbellard                 saved_tb = env->current_tb;
1085ea1c1802Sbellard                 env->current_tb = NULL;
10866f5a9f7eSbellard             }
10879fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10886f5a9f7eSbellard             if (env) {
1089ea1c1802Sbellard                 env->current_tb = saved_tb;
1090ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1091ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10929fa3e853Sbellard             }
10936f5a9f7eSbellard         }
10949fa3e853Sbellard         tb = tb_next;
10959fa3e853Sbellard     }
10969fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10979fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10989fa3e853Sbellard     if (!p->first_tb) {
10999fa3e853Sbellard         invalidate_page_bitmap(p);
1100d720b93dSbellard         if (is_cpu_write_access) {
11012e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1102d720b93dSbellard         }
1103d720b93dSbellard     }
1104d720b93dSbellard #endif
1105d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1106d720b93dSbellard     if (current_tb_modified) {
1107d720b93dSbellard         /* we generate a block containing just the instruction
1108d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1109d720b93dSbellard            itself */
1110ea1c1802Sbellard         env->current_tb = NULL;
11112e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1112d720b93dSbellard         cpu_resume_from_signal(env, NULL);
11139fa3e853Sbellard     }
11149fa3e853Sbellard #endif
11159fa3e853Sbellard }
11169fa3e853Sbellard 
11179fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
111841c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
11199fa3e853Sbellard {
11209fa3e853Sbellard     PageDesc *p;
11219fa3e853Sbellard     int offset, b;
112259817ccbSbellard #if 0
1123a4193c8aSbellard     if (1) {
112493fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
11252e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1126a4193c8aSbellard                   cpu_single_env->eip,
1127a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1128a4193c8aSbellard     }
112959817ccbSbellard #endif
11309fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11319fa3e853Sbellard     if (!p)
11329fa3e853Sbellard         return;
11339fa3e853Sbellard     if (p->code_bitmap) {
11349fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
11359fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
11369fa3e853Sbellard         if (b & ((1 << len) - 1))
11379fa3e853Sbellard             goto do_invalidate;
11389fa3e853Sbellard     } else {
11399fa3e853Sbellard     do_invalidate:
1140d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
11419fa3e853Sbellard     }
11429fa3e853Sbellard }
11439fa3e853Sbellard 
11449fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
114541c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
1146d720b93dSbellard                                     unsigned long pc, void *puc)
11479fa3e853Sbellard {
11486b917547Saliguori     TranslationBlock *tb;
11499fa3e853Sbellard     PageDesc *p;
11506b917547Saliguori     int n;
1151d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
11526b917547Saliguori     TranslationBlock *current_tb = NULL;
1153d720b93dSbellard     CPUState *env = cpu_single_env;
11546b917547Saliguori     int current_tb_modified = 0;
11556b917547Saliguori     target_ulong current_pc = 0;
11566b917547Saliguori     target_ulong current_cs_base = 0;
11576b917547Saliguori     int current_flags = 0;
1158d720b93dSbellard #endif
11599fa3e853Sbellard 
11609fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
11619fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1162fd6ce8f6Sbellard     if (!p)
1163fd6ce8f6Sbellard         return;
1164fd6ce8f6Sbellard     tb = p->first_tb;
1165d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1166d720b93dSbellard     if (tb && pc != 0) {
1167d720b93dSbellard         current_tb = tb_find_pc(pc);
1168d720b93dSbellard     }
1169d720b93dSbellard #endif
1170fd6ce8f6Sbellard     while (tb != NULL) {
11719fa3e853Sbellard         n = (long)tb & 3;
11729fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1173d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1174d720b93dSbellard         if (current_tb == tb &&
11752e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1177d720b93dSbellard                    its execution. We could be more precise by checking
1178d720b93dSbellard                    that the modification is after the current PC, but it
1179d720b93dSbellard                    would require a specialized function to partially
1180d720b93dSbellard                    restore the CPU state */
1181d720b93dSbellard 
1182d720b93dSbellard             current_tb_modified = 1;
1183618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
11846b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11856b917547Saliguori                                  &current_flags);
1186d720b93dSbellard         }
1187d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11889fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11899fa3e853Sbellard         tb = tb->page_next[n];
1190fd6ce8f6Sbellard     }
1191fd6ce8f6Sbellard     p->first_tb = NULL;
1192d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1193d720b93dSbellard     if (current_tb_modified) {
1194d720b93dSbellard         /* we generate a block containing just the instruction
1195d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1196d720b93dSbellard            itself */
1197ea1c1802Sbellard         env->current_tb = NULL;
11982e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199d720b93dSbellard         cpu_resume_from_signal(env, puc);
1200d720b93dSbellard     }
1201d720b93dSbellard #endif
1202fd6ce8f6Sbellard }
12039fa3e853Sbellard #endif
1204fd6ce8f6Sbellard 
1205fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
12069fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
120741c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1208fd6ce8f6Sbellard {
1209fd6ce8f6Sbellard     PageDesc *p;
12104429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12114429ab44SJuan Quintela     bool page_already_protected;
12124429ab44SJuan Quintela #endif
12139fa3e853Sbellard 
12149fa3e853Sbellard     tb->page_addr[n] = page_addr;
12155cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
12169fa3e853Sbellard     tb->page_next[n] = p->first_tb;
12174429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12184429ab44SJuan Quintela     page_already_protected = p->first_tb != NULL;
12194429ab44SJuan Quintela #endif
12209fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
12219fa3e853Sbellard     invalidate_page_bitmap(p);
12229fa3e853Sbellard 
1223107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1224d720b93dSbellard 
12259fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
12269fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
122753a5960aSpbrook         target_ulong addr;
122853a5960aSpbrook         PageDesc *p2;
1229fd6ce8f6Sbellard         int prot;
1230fd6ce8f6Sbellard 
1231fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1232fd6ce8f6Sbellard            page fault + mprotect overhead) */
123353a5960aSpbrook         page_addr &= qemu_host_page_mask;
1234fd6ce8f6Sbellard         prot = 0;
123553a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
123653a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
123753a5960aSpbrook 
123853a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
123953a5960aSpbrook             if (!p2)
124053a5960aSpbrook                 continue;
124153a5960aSpbrook             prot |= p2->flags;
124253a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
124353a5960aSpbrook           }
124453a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1245fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1246fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1247ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
124853a5960aSpbrook                page_addr);
1249fd6ce8f6Sbellard #endif
1250fd6ce8f6Sbellard     }
12519fa3e853Sbellard #else
12529fa3e853Sbellard     /* if some code is already present, then the pages are already
12539fa3e853Sbellard        protected. So we handle the case where only the first TB is
12549fa3e853Sbellard        allocated in a physical page */
12554429ab44SJuan Quintela     if (!page_already_protected) {
12566a00d601Sbellard         tlb_protect_code(page_addr);
12579fa3e853Sbellard     }
12589fa3e853Sbellard #endif
1259d720b93dSbellard 
1260d720b93dSbellard #endif /* TARGET_HAS_SMC */
1261fd6ce8f6Sbellard }
1262fd6ce8f6Sbellard 
12639fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12649fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
126541c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
126641c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1267d4e8164fSbellard {
12689fa3e853Sbellard     unsigned int h;
12699fa3e853Sbellard     TranslationBlock **ptb;
12709fa3e853Sbellard 
1271c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1272c8a706feSpbrook        before we are done.  */
1273c8a706feSpbrook     mmap_lock();
12749fa3e853Sbellard     /* add in the physical hash table */
12759fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12769fa3e853Sbellard     ptb = &tb_phys_hash[h];
12779fa3e853Sbellard     tb->phys_hash_next = *ptb;
12789fa3e853Sbellard     *ptb = tb;
1279fd6ce8f6Sbellard 
1280fd6ce8f6Sbellard     /* add in the page list */
12819fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12829fa3e853Sbellard     if (phys_page2 != -1)
12839fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12849fa3e853Sbellard     else
12859fa3e853Sbellard         tb->page_addr[1] = -1;
12869fa3e853Sbellard 
1287d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1288d4e8164fSbellard     tb->jmp_next[0] = NULL;
1289d4e8164fSbellard     tb->jmp_next[1] = NULL;
1290d4e8164fSbellard 
1291d4e8164fSbellard     /* init original jump addresses */
1292d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1293d4e8164fSbellard         tb_reset_jump(tb, 0);
1294d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1295d4e8164fSbellard         tb_reset_jump(tb, 1);
12968a40a180Sbellard 
12978a40a180Sbellard #ifdef DEBUG_TB_CHECK
12988a40a180Sbellard     tb_page_check();
12998a40a180Sbellard #endif
1300c8a706feSpbrook     mmap_unlock();
1301fd6ce8f6Sbellard }
1302fd6ce8f6Sbellard 
1303a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1304a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1305a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1306a513fe19Sbellard {
1307a513fe19Sbellard     int m_min, m_max, m;
1308a513fe19Sbellard     unsigned long v;
1309a513fe19Sbellard     TranslationBlock *tb;
1310a513fe19Sbellard 
1311a513fe19Sbellard     if (nb_tbs <= 0)
1312a513fe19Sbellard         return NULL;
1313a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1314a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1315a513fe19Sbellard         return NULL;
1316a513fe19Sbellard     /* binary search (cf Knuth) */
1317a513fe19Sbellard     m_min = 0;
1318a513fe19Sbellard     m_max = nb_tbs - 1;
1319a513fe19Sbellard     while (m_min <= m_max) {
1320a513fe19Sbellard         m = (m_min + m_max) >> 1;
1321a513fe19Sbellard         tb = &tbs[m];
1322a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1323a513fe19Sbellard         if (v == tc_ptr)
1324a513fe19Sbellard             return tb;
1325a513fe19Sbellard         else if (tc_ptr < v) {
1326a513fe19Sbellard             m_max = m - 1;
1327a513fe19Sbellard         } else {
1328a513fe19Sbellard             m_min = m + 1;
1329a513fe19Sbellard         }
1330a513fe19Sbellard     }
1331a513fe19Sbellard     return &tbs[m_max];
1332a513fe19Sbellard }
13337501267eSbellard 
1334ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1335ea041c0eSbellard 
1336ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1337ea041c0eSbellard {
1338ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1339ea041c0eSbellard     unsigned int n1;
1340ea041c0eSbellard 
1341ea041c0eSbellard     tb1 = tb->jmp_next[n];
1342ea041c0eSbellard     if (tb1 != NULL) {
1343ea041c0eSbellard         /* find head of list */
1344ea041c0eSbellard         for(;;) {
1345ea041c0eSbellard             n1 = (long)tb1 & 3;
1346ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1347ea041c0eSbellard             if (n1 == 2)
1348ea041c0eSbellard                 break;
1349ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1350ea041c0eSbellard         }
1351ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1352ea041c0eSbellard         tb_next = tb1;
1353ea041c0eSbellard 
1354ea041c0eSbellard         /* remove tb from the jmp_first list */
1355ea041c0eSbellard         ptb = &tb_next->jmp_first;
1356ea041c0eSbellard         for(;;) {
1357ea041c0eSbellard             tb1 = *ptb;
1358ea041c0eSbellard             n1 = (long)tb1 & 3;
1359ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360ea041c0eSbellard             if (n1 == n && tb1 == tb)
1361ea041c0eSbellard                 break;
1362ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1363ea041c0eSbellard         }
1364ea041c0eSbellard         *ptb = tb->jmp_next[n];
1365ea041c0eSbellard         tb->jmp_next[n] = NULL;
1366ea041c0eSbellard 
1367ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1368ea041c0eSbellard         tb_reset_jump(tb, n);
1369ea041c0eSbellard 
13700124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1371ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1372ea041c0eSbellard     }
1373ea041c0eSbellard }
1374ea041c0eSbellard 
1375ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1376ea041c0eSbellard {
1377ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1378ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1379ea041c0eSbellard }
1380ea041c0eSbellard 
13811fddef4bSbellard #if defined(TARGET_HAS_ICE)
138294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
138394df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
138494df27fdSPaul Brook {
138594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
138694df27fdSPaul Brook }
138794df27fdSPaul Brook #else
1388d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389d720b93dSbellard {
1390c227f099SAnthony Liguori     target_phys_addr_t addr;
13919b3c35e0Sj_mayer     target_ulong pd;
1392c227f099SAnthony Liguori     ram_addr_t ram_addr;
1393c2f07f81Spbrook     PhysPageDesc *p;
1394d720b93dSbellard 
1395c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1396c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1397c2f07f81Spbrook     if (!p) {
1398c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1399c2f07f81Spbrook     } else {
1400c2f07f81Spbrook         pd = p->phys_offset;
1401c2f07f81Spbrook     }
1402c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1403706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1404d720b93dSbellard }
1405c27004ecSbellard #endif
140694df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1407d720b93dSbellard 
1408c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1409c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410c527ee8fSPaul Brook 
1411c527ee8fSPaul Brook {
1412c527ee8fSPaul Brook }
1413c527ee8fSPaul Brook 
1414c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1415c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1416c527ee8fSPaul Brook {
1417c527ee8fSPaul Brook     return -ENOSYS;
1418c527ee8fSPaul Brook }
1419c527ee8fSPaul Brook #else
14206658ffb8Spbrook /* Add a watchpoint.  */
1421a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
14236658ffb8Spbrook {
1424b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1425c0ce998eSaliguori     CPUWatchpoint *wp;
14266658ffb8Spbrook 
1427b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1428b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1429b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1430b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1431b4051334Saliguori         return -EINVAL;
1432b4051334Saliguori     }
1433a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
14346658ffb8Spbrook 
1435a1d1bb31Saliguori     wp->vaddr = addr;
1436b4051334Saliguori     wp->len_mask = len_mask;
1437a1d1bb31Saliguori     wp->flags = flags;
1438a1d1bb31Saliguori 
14392dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1440c0ce998eSaliguori     if (flags & BP_GDB)
144172cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1442c0ce998eSaliguori     else
144372cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1444a1d1bb31Saliguori 
14456658ffb8Spbrook     tlb_flush_page(env, addr);
1446a1d1bb31Saliguori 
1447a1d1bb31Saliguori     if (watchpoint)
1448a1d1bb31Saliguori         *watchpoint = wp;
1449a1d1bb31Saliguori     return 0;
14506658ffb8Spbrook }
14516658ffb8Spbrook 
1452a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1453a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1454a1d1bb31Saliguori                           int flags)
14556658ffb8Spbrook {
1456b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1457a1d1bb31Saliguori     CPUWatchpoint *wp;
14586658ffb8Spbrook 
145972cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1460b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14616e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1462a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14636658ffb8Spbrook             return 0;
14646658ffb8Spbrook         }
14656658ffb8Spbrook     }
1466a1d1bb31Saliguori     return -ENOENT;
14676658ffb8Spbrook }
14686658ffb8Spbrook 
1469a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1470a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1471a1d1bb31Saliguori {
147272cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14737d03f82fSedgar_igl 
1474a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1475a1d1bb31Saliguori 
1476a1d1bb31Saliguori     qemu_free(watchpoint);
14777d03f82fSedgar_igl }
14787d03f82fSedgar_igl 
1479a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1480a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1481a1d1bb31Saliguori {
1482c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1483a1d1bb31Saliguori 
148472cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1485a1d1bb31Saliguori         if (wp->flags & mask)
1486a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1487a1d1bb31Saliguori     }
1488c0ce998eSaliguori }
1489c527ee8fSPaul Brook #endif
1490a1d1bb31Saliguori 
1491a1d1bb31Saliguori /* Add a breakpoint.  */
1492a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1493a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14944c3a88a2Sbellard {
14951fddef4bSbellard #if defined(TARGET_HAS_ICE)
1496c0ce998eSaliguori     CPUBreakpoint *bp;
14974c3a88a2Sbellard 
1498a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14994c3a88a2Sbellard 
1500a1d1bb31Saliguori     bp->pc = pc;
1501a1d1bb31Saliguori     bp->flags = flags;
1502a1d1bb31Saliguori 
15032dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1504c0ce998eSaliguori     if (flags & BP_GDB)
150572cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1506c0ce998eSaliguori     else
150772cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1508d720b93dSbellard 
1509d720b93dSbellard     breakpoint_invalidate(env, pc);
1510a1d1bb31Saliguori 
1511a1d1bb31Saliguori     if (breakpoint)
1512a1d1bb31Saliguori         *breakpoint = bp;
15134c3a88a2Sbellard     return 0;
15144c3a88a2Sbellard #else
1515a1d1bb31Saliguori     return -ENOSYS;
15164c3a88a2Sbellard #endif
15174c3a88a2Sbellard }
15184c3a88a2Sbellard 
1519a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1520a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1521a1d1bb31Saliguori {
15227d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1523a1d1bb31Saliguori     CPUBreakpoint *bp;
1524a1d1bb31Saliguori 
152572cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1526a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1527a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1528a1d1bb31Saliguori             return 0;
15297d03f82fSedgar_igl         }
1530a1d1bb31Saliguori     }
1531a1d1bb31Saliguori     return -ENOENT;
1532a1d1bb31Saliguori #else
1533a1d1bb31Saliguori     return -ENOSYS;
15347d03f82fSedgar_igl #endif
15357d03f82fSedgar_igl }
15367d03f82fSedgar_igl 
1537a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1538a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
15394c3a88a2Sbellard {
15401fddef4bSbellard #if defined(TARGET_HAS_ICE)
154172cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1542d720b93dSbellard 
1543a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1544a1d1bb31Saliguori 
1545a1d1bb31Saliguori     qemu_free(breakpoint);
1546a1d1bb31Saliguori #endif
1547a1d1bb31Saliguori }
1548a1d1bb31Saliguori 
1549a1d1bb31Saliguori /* Remove all matching breakpoints. */
1550a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1551a1d1bb31Saliguori {
1552a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1553c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1554a1d1bb31Saliguori 
155572cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1556a1d1bb31Saliguori         if (bp->flags & mask)
1557a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1558c0ce998eSaliguori     }
15594c3a88a2Sbellard #endif
15604c3a88a2Sbellard }
15614c3a88a2Sbellard 
1562c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1563c33a346eSbellard    CPU loop after each instruction */
1564c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1565c33a346eSbellard {
15661fddef4bSbellard #if defined(TARGET_HAS_ICE)
1567c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1568c33a346eSbellard         env->singlestep_enabled = enabled;
1569e22a25c9Saliguori         if (kvm_enabled())
1570e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1571e22a25c9Saliguori         else {
1572ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15739fa3e853Sbellard             /* XXX: only flush what is necessary */
15740124311eSbellard             tb_flush(env);
1575c33a346eSbellard         }
1576e22a25c9Saliguori     }
1577c33a346eSbellard #endif
1578c33a346eSbellard }
1579c33a346eSbellard 
158034865134Sbellard /* enable or disable low levels log */
158134865134Sbellard void cpu_set_log(int log_flags)
158234865134Sbellard {
158334865134Sbellard     loglevel = log_flags;
158434865134Sbellard     if (loglevel && !logfile) {
158511fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
158634865134Sbellard         if (!logfile) {
158734865134Sbellard             perror(logfilename);
158834865134Sbellard             _exit(1);
158934865134Sbellard         }
15909fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15919fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15929fa3e853Sbellard         {
1593b55266b5Sblueswir1             static char logfile_buf[4096];
15949fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15959fa3e853Sbellard         }
1596bf65f53fSFilip Navara #elif !defined(_WIN32)
1597bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
159834865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15999fa3e853Sbellard #endif
1600e735b91cSpbrook         log_append = 1;
1601e735b91cSpbrook     }
1602e735b91cSpbrook     if (!loglevel && logfile) {
1603e735b91cSpbrook         fclose(logfile);
1604e735b91cSpbrook         logfile = NULL;
160534865134Sbellard     }
160634865134Sbellard }
160734865134Sbellard 
160834865134Sbellard void cpu_set_log_filename(const char *filename)
160934865134Sbellard {
161034865134Sbellard     logfilename = strdup(filename);
1611e735b91cSpbrook     if (logfile) {
1612e735b91cSpbrook         fclose(logfile);
1613e735b91cSpbrook         logfile = NULL;
1614e735b91cSpbrook     }
1615e735b91cSpbrook     cpu_set_log(loglevel);
161634865134Sbellard }
1617c33a346eSbellard 
16183098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1619ea041c0eSbellard {
1620d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1621d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1622d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1623d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16243098dba0Saurel32     TranslationBlock *tb;
1625c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16263098dba0Saurel32 
1627cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16283098dba0Saurel32     tb = env->current_tb;
16293098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16303098dba0Saurel32        all the potentially executing TB */
1631f76cfe56SRiku Voipio     if (tb) {
16323098dba0Saurel32         env->current_tb = NULL;
16333098dba0Saurel32         tb_reset_jump_recursive(tb);
16343098dba0Saurel32     }
1635cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16363098dba0Saurel32 }
16373098dba0Saurel32 
163897ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
16393098dba0Saurel32 /* mask must never be zero, except for A20 change call */
1640ec6959d0SJan Kiszka static void tcg_handle_interrupt(CPUState *env, int mask)
16413098dba0Saurel32 {
16423098dba0Saurel32     int old_mask;
16433098dba0Saurel32 
16443098dba0Saurel32     old_mask = env->interrupt_request;
16453098dba0Saurel32     env->interrupt_request |= mask;
16463098dba0Saurel32 
16478edac960Saliguori     /*
16488edac960Saliguori      * If called from iothread context, wake the target cpu in
16498edac960Saliguori      * case its halted.
16508edac960Saliguori      */
1651b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
16528edac960Saliguori         qemu_cpu_kick(env);
16538edac960Saliguori         return;
16548edac960Saliguori     }
16558edac960Saliguori 
16562e70f6efSpbrook     if (use_icount) {
1657266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16582e70f6efSpbrook         if (!can_do_io(env)
1659be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16602e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16612e70f6efSpbrook         }
16622e70f6efSpbrook     } else {
16633098dba0Saurel32         cpu_unlink_tb(env);
1664ea041c0eSbellard     }
16652e70f6efSpbrook }
1666ea041c0eSbellard 
1667ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1668ec6959d0SJan Kiszka 
166997ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
167097ffbd8dSJan Kiszka 
167197ffbd8dSJan Kiszka void cpu_interrupt(CPUState *env, int mask)
167297ffbd8dSJan Kiszka {
167397ffbd8dSJan Kiszka     env->interrupt_request |= mask;
167497ffbd8dSJan Kiszka     cpu_unlink_tb(env);
167597ffbd8dSJan Kiszka }
167697ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
167797ffbd8dSJan Kiszka 
1678b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1679b54ad049Sbellard {
1680b54ad049Sbellard     env->interrupt_request &= ~mask;
1681b54ad049Sbellard }
1682b54ad049Sbellard 
16833098dba0Saurel32 void cpu_exit(CPUState *env)
16843098dba0Saurel32 {
16853098dba0Saurel32     env->exit_request = 1;
16863098dba0Saurel32     cpu_unlink_tb(env);
16873098dba0Saurel32 }
16883098dba0Saurel32 
1689c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1690f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1691f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1692f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1693f193c797Sbellard       "show target assembly code for each compiled TB" },
1694f193c797Sbellard     { CPU_LOG_TB_OP, "op",
169557fec1feSbellard       "show micro ops for each compiled TB" },
1696f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1697e01a1157Sblueswir1       "show micro ops "
1698e01a1157Sblueswir1 #ifdef TARGET_I386
1699e01a1157Sblueswir1       "before eflags optimization and "
1700f193c797Sbellard #endif
1701e01a1157Sblueswir1       "after liveness analysis" },
1702f193c797Sbellard     { CPU_LOG_INT, "int",
1703f193c797Sbellard       "show interrupts/exceptions in short format" },
1704f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1705f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
17069fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1707e91c8a77Sths       "show CPU state before block translation" },
1708f193c797Sbellard #ifdef TARGET_I386
1709f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1710f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1711eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1712eca1bdf4Saliguori       "show CPU state before CPU resets" },
1713f193c797Sbellard #endif
17148e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1715fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1716fd872598Sbellard       "show all i/o ports accesses" },
17178e3a9fd2Sbellard #endif
1718f193c797Sbellard     { 0, NULL, NULL },
1719f193c797Sbellard };
1720f193c797Sbellard 
1721f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1722f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1723f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1724f6f3fbcaSMichael S. Tsirkin 
1725f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1726f6f3fbcaSMichael S. Tsirkin                                   ram_addr_t size,
17270fd542fbSMichael S. Tsirkin                                   ram_addr_t phys_offset,
17280fd542fbSMichael S. Tsirkin                                   bool log_dirty)
1729f6f3fbcaSMichael S. Tsirkin {
1730f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1731f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
17320fd542fbSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1733f6f3fbcaSMichael S. Tsirkin     }
1734f6f3fbcaSMichael S. Tsirkin }
1735f6f3fbcaSMichael S. Tsirkin 
1736f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1737f6f3fbcaSMichael S. Tsirkin                                         target_phys_addr_t end)
1738f6f3fbcaSMichael S. Tsirkin {
1739f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1740f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1741f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1742f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1743f6f3fbcaSMichael S. Tsirkin             return r;
1744f6f3fbcaSMichael S. Tsirkin     }
1745f6f3fbcaSMichael S. Tsirkin     return 0;
1746f6f3fbcaSMichael S. Tsirkin }
1747f6f3fbcaSMichael S. Tsirkin 
1748f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1749f6f3fbcaSMichael S. Tsirkin {
1750f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1751f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1752f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1753f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1754f6f3fbcaSMichael S. Tsirkin             return r;
1755f6f3fbcaSMichael S. Tsirkin     }
1756f6f3fbcaSMichael S. Tsirkin     return 0;
1757f6f3fbcaSMichael S. Tsirkin }
1758f6f3fbcaSMichael S. Tsirkin 
17592173a75fSAlex Williamson struct last_map {
17602173a75fSAlex Williamson     target_phys_addr_t start_addr;
17612173a75fSAlex Williamson     ram_addr_t size;
17622173a75fSAlex Williamson     ram_addr_t phys_offset;
17632173a75fSAlex Williamson };
17642173a75fSAlex Williamson 
17658d4c78e7SAlex Williamson /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
17668d4c78e7SAlex Williamson  * address.  Each intermediate table provides the next L2_BITs of guest
17678d4c78e7SAlex Williamson  * physical address space.  The number of levels vary based on host and
17688d4c78e7SAlex Williamson  * guest configuration, making it efficient to build the final guest
17698d4c78e7SAlex Williamson  * physical address by seeding the L1 offset and shifting and adding in
17708d4c78e7SAlex Williamson  * each L2 offset as we recurse through them. */
17712173a75fSAlex Williamson static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
17722173a75fSAlex Williamson                                  void **lp, target_phys_addr_t addr,
17732173a75fSAlex Williamson                                  struct last_map *map)
1774f6f3fbcaSMichael S. Tsirkin {
17755cd2c5b6SRichard Henderson     int i;
1776f6f3fbcaSMichael S. Tsirkin 
17775cd2c5b6SRichard Henderson     if (*lp == NULL) {
17785cd2c5b6SRichard Henderson         return;
1779f6f3fbcaSMichael S. Tsirkin     }
17805cd2c5b6SRichard Henderson     if (level == 0) {
17815cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17828d4c78e7SAlex Williamson         addr <<= L2_BITS + TARGET_PAGE_BITS;
17837296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17845cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17852173a75fSAlex Williamson                 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
17862173a75fSAlex Williamson 
17872173a75fSAlex Williamson                 if (map->size &&
17882173a75fSAlex Williamson                     start_addr == map->start_addr + map->size &&
17892173a75fSAlex Williamson                     pd[i].phys_offset == map->phys_offset + map->size) {
17902173a75fSAlex Williamson 
17912173a75fSAlex Williamson                     map->size += TARGET_PAGE_SIZE;
17922173a75fSAlex Williamson                     continue;
17932173a75fSAlex Williamson                 } else if (map->size) {
17942173a75fSAlex Williamson                     client->set_memory(client, map->start_addr,
17952173a75fSAlex Williamson                                        map->size, map->phys_offset, false);
17962173a75fSAlex Williamson                 }
17972173a75fSAlex Williamson 
17982173a75fSAlex Williamson                 map->start_addr = start_addr;
17992173a75fSAlex Williamson                 map->size = TARGET_PAGE_SIZE;
18002173a75fSAlex Williamson                 map->phys_offset = pd[i].phys_offset;
1801f6f3fbcaSMichael S. Tsirkin             }
18025cd2c5b6SRichard Henderson         }
18035cd2c5b6SRichard Henderson     } else {
18045cd2c5b6SRichard Henderson         void **pp = *lp;
18057296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
18068d4c78e7SAlex Williamson             phys_page_for_each_1(client, level - 1, pp + i,
18072173a75fSAlex Williamson                                  (addr << L2_BITS) | i, map);
1808f6f3fbcaSMichael S. Tsirkin         }
1809f6f3fbcaSMichael S. Tsirkin     }
1810f6f3fbcaSMichael S. Tsirkin }
1811f6f3fbcaSMichael S. Tsirkin 
1812f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1813f6f3fbcaSMichael S. Tsirkin {
18145cd2c5b6SRichard Henderson     int i;
18152173a75fSAlex Williamson     struct last_map map = { };
18162173a75fSAlex Williamson 
18175cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
18185cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
18192173a75fSAlex Williamson                              l1_phys_map + i, i, &map);
18202173a75fSAlex Williamson     }
18212173a75fSAlex Williamson     if (map.size) {
18222173a75fSAlex Williamson         client->set_memory(client, map.start_addr, map.size, map.phys_offset,
18232173a75fSAlex Williamson                            false);
1824f6f3fbcaSMichael S. Tsirkin     }
1825f6f3fbcaSMichael S. Tsirkin }
1826f6f3fbcaSMichael S. Tsirkin 
1827f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1828f6f3fbcaSMichael S. Tsirkin {
1829f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1830f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1831f6f3fbcaSMichael S. Tsirkin }
1832f6f3fbcaSMichael S. Tsirkin 
1833f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1834f6f3fbcaSMichael S. Tsirkin {
1835f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1836f6f3fbcaSMichael S. Tsirkin }
1837f6f3fbcaSMichael S. Tsirkin #endif
1838f6f3fbcaSMichael S. Tsirkin 
1839f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1840f193c797Sbellard {
1841f193c797Sbellard     if (strlen(s2) != n)
1842f193c797Sbellard         return 0;
1843f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1844f193c797Sbellard }
1845f193c797Sbellard 
1846f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1847f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1848f193c797Sbellard {
1849c7cd6a37Sblueswir1     const CPULogItem *item;
1850f193c797Sbellard     int mask;
1851f193c797Sbellard     const char *p, *p1;
1852f193c797Sbellard 
1853f193c797Sbellard     p = str;
1854f193c797Sbellard     mask = 0;
1855f193c797Sbellard     for(;;) {
1856f193c797Sbellard         p1 = strchr(p, ',');
1857f193c797Sbellard         if (!p1)
1858f193c797Sbellard             p1 = p + strlen(p);
18598e3a9fd2Sbellard         if(cmp1(p,p1-p,"all")) {
18608e3a9fd2Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
18618e3a9fd2Sbellard                 mask |= item->mask;
18628e3a9fd2Sbellard             }
18638e3a9fd2Sbellard         } else {
1864f193c797Sbellard             for(item = cpu_log_items; item->mask != 0; item++) {
1865f193c797Sbellard                 if (cmp1(p, p1 - p, item->name))
1866f193c797Sbellard                     goto found;
1867f193c797Sbellard             }
1868f193c797Sbellard             return 0;
18698e3a9fd2Sbellard         }
1870f193c797Sbellard     found:
1871f193c797Sbellard         mask |= item->mask;
1872f193c797Sbellard         if (*p1 != ',')
1873f193c797Sbellard             break;
1874f193c797Sbellard         p = p1 + 1;
1875f193c797Sbellard     }
1876f193c797Sbellard     return mask;
1877f193c797Sbellard }
1878ea041c0eSbellard 
18797501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
18807501267eSbellard {
18817501267eSbellard     va_list ap;
1882493ae1f0Spbrook     va_list ap2;
18837501267eSbellard 
18847501267eSbellard     va_start(ap, fmt);
1885493ae1f0Spbrook     va_copy(ap2, ap);
18867501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18877501267eSbellard     vfprintf(stderr, fmt, ap);
18887501267eSbellard     fprintf(stderr, "\n");
18897501267eSbellard #ifdef TARGET_I386
18907fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
18917fe48483Sbellard #else
18927fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
18937501267eSbellard #endif
189493fcfe39Saliguori     if (qemu_log_enabled()) {
189593fcfe39Saliguori         qemu_log("qemu: fatal: ");
189693fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
189793fcfe39Saliguori         qemu_log("\n");
1898f9373291Sj_mayer #ifdef TARGET_I386
189993fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1900f9373291Sj_mayer #else
190193fcfe39Saliguori         log_cpu_state(env, 0);
1902f9373291Sj_mayer #endif
190331b1a7b4Saliguori         qemu_log_flush();
190493fcfe39Saliguori         qemu_log_close();
1905924edcaeSbalrog     }
1906493ae1f0Spbrook     va_end(ap2);
1907f9373291Sj_mayer     va_end(ap);
1908fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1909fd052bf6SRiku Voipio     {
1910fd052bf6SRiku Voipio         struct sigaction act;
1911fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1912fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1913fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1914fd052bf6SRiku Voipio     }
1915fd052bf6SRiku Voipio #endif
19167501267eSbellard     abort();
19177501267eSbellard }
19187501267eSbellard 
1919c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1920c5be9f08Sths {
192101ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1922c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1923c5be9f08Sths     int cpu_index = new_env->cpu_index;
19245a38f081Saliguori #if defined(TARGET_HAS_ICE)
19255a38f081Saliguori     CPUBreakpoint *bp;
19265a38f081Saliguori     CPUWatchpoint *wp;
19275a38f081Saliguori #endif
19285a38f081Saliguori 
1929c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
19305a38f081Saliguori 
19315a38f081Saliguori     /* Preserve chaining and index. */
1932c5be9f08Sths     new_env->next_cpu = next_cpu;
1933c5be9f08Sths     new_env->cpu_index = cpu_index;
19345a38f081Saliguori 
19355a38f081Saliguori     /* Clone all break/watchpoints.
19365a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
19375a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
193872cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
193972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
19405a38f081Saliguori #if defined(TARGET_HAS_ICE)
194172cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
19425a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
19435a38f081Saliguori     }
194472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
19455a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
19465a38f081Saliguori                               wp->flags, NULL);
19475a38f081Saliguori     }
19485a38f081Saliguori #endif
19495a38f081Saliguori 
1950c5be9f08Sths     return new_env;
1951c5be9f08Sths }
1952c5be9f08Sths 
19530124311eSbellard #if !defined(CONFIG_USER_ONLY)
19540124311eSbellard 
19555c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
19565c751e99Sedgar_igl {
19575c751e99Sedgar_igl     unsigned int i;
19585c751e99Sedgar_igl 
19595c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
19605c751e99Sedgar_igl        overlap the flushed page.  */
19615c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
19625c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19635c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19645c751e99Sedgar_igl 
19655c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
19665c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19675c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19685c751e99Sedgar_igl }
19695c751e99Sedgar_igl 
197008738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
197108738984SIgor Kovalenko     .addr_read  = -1,
197208738984SIgor Kovalenko     .addr_write = -1,
197308738984SIgor Kovalenko     .addr_code  = -1,
197408738984SIgor Kovalenko     .addend     = -1,
197508738984SIgor Kovalenko };
197608738984SIgor Kovalenko 
1977ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1978ee8b7021Sbellard    implemented yet) */
1979ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
198033417e70Sbellard {
198133417e70Sbellard     int i;
19820124311eSbellard 
19839fa3e853Sbellard #if defined(DEBUG_TLB)
19849fa3e853Sbellard     printf("tlb_flush:\n");
19859fa3e853Sbellard #endif
19860124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19870124311eSbellard        links while we are modifying them */
19880124311eSbellard     env->current_tb = NULL;
19890124311eSbellard 
199033417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1991cfde4bd9SIsaku Yamahata         int mmu_idx;
1992cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
199308738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1994cfde4bd9SIsaku Yamahata         }
199533417e70Sbellard     }
19969fa3e853Sbellard 
19978a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
19989fa3e853Sbellard 
1999d4c430a8SPaul Brook     env->tlb_flush_addr = -1;
2000d4c430a8SPaul Brook     env->tlb_flush_mask = 0;
2001e3db7226Sbellard     tlb_flush_count++;
200233417e70Sbellard }
200333417e70Sbellard 
2004274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
200561382a50Sbellard {
200684b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
200784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
200884b7b8e7Sbellard         addr == (tlb_entry->addr_write &
200984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
201084b7b8e7Sbellard         addr == (tlb_entry->addr_code &
201184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
201208738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
201384b7b8e7Sbellard     }
201461382a50Sbellard }
201561382a50Sbellard 
20162e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
201733417e70Sbellard {
20188a40a180Sbellard     int i;
2019cfde4bd9SIsaku Yamahata     int mmu_idx;
20200124311eSbellard 
20219fa3e853Sbellard #if defined(DEBUG_TLB)
2022108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
20239fa3e853Sbellard #endif
2024d4c430a8SPaul Brook     /* Check if we need to flush due to large pages.  */
2025d4c430a8SPaul Brook     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2026d4c430a8SPaul Brook #if defined(DEBUG_TLB)
2027d4c430a8SPaul Brook         printf("tlb_flush_page: forced full flush ("
2028d4c430a8SPaul Brook                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2029d4c430a8SPaul Brook                env->tlb_flush_addr, env->tlb_flush_mask);
2030d4c430a8SPaul Brook #endif
2031d4c430a8SPaul Brook         tlb_flush(env, 1);
2032d4c430a8SPaul Brook         return;
2033d4c430a8SPaul Brook     }
20340124311eSbellard     /* must reset current TB so that interrupts cannot modify the
20350124311eSbellard        links while we are modifying them */
20360124311eSbellard     env->current_tb = NULL;
203733417e70Sbellard 
203861382a50Sbellard     addr &= TARGET_PAGE_MASK;
203933417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2040cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2041cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
20420124311eSbellard 
20435c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
20449fa3e853Sbellard }
20459fa3e853Sbellard 
20469fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
20479fa3e853Sbellard    can be detected */
2048c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
204961382a50Sbellard {
20506a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
20516a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
20526a00d601Sbellard                                     CODE_DIRTY_FLAG);
20539fa3e853Sbellard }
20549fa3e853Sbellard 
20559fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
20563a7d929eSbellard    tested for self modifying code */
2057c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
20583a7d929eSbellard                                     target_ulong vaddr)
20599fa3e853Sbellard {
2060f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
20619fa3e853Sbellard }
20629fa3e853Sbellard 
20631ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
20641ccde1cbSbellard                                          unsigned long start, unsigned long length)
20651ccde1cbSbellard {
20661ccde1cbSbellard     unsigned long addr;
206784b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
206884b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
20691ccde1cbSbellard         if ((addr - start) < length) {
20700f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
20711ccde1cbSbellard         }
20721ccde1cbSbellard     }
20731ccde1cbSbellard }
20741ccde1cbSbellard 
20755579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
2076c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
20770a962c02Sbellard                                      int dirty_flags)
20781ccde1cbSbellard {
20791ccde1cbSbellard     CPUState *env;
20804f2ac237Sbellard     unsigned long length, start1;
2081f7c11b53SYoshiaki Tamura     int i;
20821ccde1cbSbellard 
20831ccde1cbSbellard     start &= TARGET_PAGE_MASK;
20841ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
20851ccde1cbSbellard 
20861ccde1cbSbellard     length = end - start;
20871ccde1cbSbellard     if (length == 0)
20881ccde1cbSbellard         return;
2089f7c11b53SYoshiaki Tamura     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2090f23db169Sbellard 
20911ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
20921ccde1cbSbellard        when accessing the range */
2093b2e0a138SMichael S. Tsirkin     start1 = (unsigned long)qemu_safe_ram_ptr(start);
2094a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
20955579c7f3Spbrook        address comparisons below.  */
2096b2e0a138SMichael S. Tsirkin     if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
20975579c7f3Spbrook             != (end - 1) - start) {
20985579c7f3Spbrook         abort();
20995579c7f3Spbrook     }
21005579c7f3Spbrook 
21016a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2102cfde4bd9SIsaku Yamahata         int mmu_idx;
2103cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21041ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2105cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2106cfde4bd9SIsaku Yamahata                                       start1, length);
2107cfde4bd9SIsaku Yamahata         }
21086a00d601Sbellard     }
21091ccde1cbSbellard }
21101ccde1cbSbellard 
211174576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
211274576198Saliguori {
2113f6f3fbcaSMichael S. Tsirkin     int ret = 0;
211474576198Saliguori     in_migration = enable;
2115f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2116f6f3fbcaSMichael S. Tsirkin     return ret;
211774576198Saliguori }
211874576198Saliguori 
211974576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
212074576198Saliguori {
212174576198Saliguori     return in_migration;
212274576198Saliguori }
212374576198Saliguori 
2124c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2125c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
21262bec46dcSaliguori {
21277b8f3b78SMichael S. Tsirkin     int ret;
2128151f7749SJan Kiszka 
2129f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2130151f7749SJan Kiszka     return ret;
21312bec46dcSaliguori }
21322bec46dcSaliguori 
2133e5896b12SAnthony PERARD int cpu_physical_log_start(target_phys_addr_t start_addr,
2134e5896b12SAnthony PERARD                            ram_addr_t size)
2135e5896b12SAnthony PERARD {
2136e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2137e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2138e5896b12SAnthony PERARD         if (client->log_start) {
2139e5896b12SAnthony PERARD             int r = client->log_start(client, start_addr, size);
2140e5896b12SAnthony PERARD             if (r < 0) {
2141e5896b12SAnthony PERARD                 return r;
2142e5896b12SAnthony PERARD             }
2143e5896b12SAnthony PERARD         }
2144e5896b12SAnthony PERARD     }
2145e5896b12SAnthony PERARD     return 0;
2146e5896b12SAnthony PERARD }
2147e5896b12SAnthony PERARD 
2148e5896b12SAnthony PERARD int cpu_physical_log_stop(target_phys_addr_t start_addr,
2149e5896b12SAnthony PERARD                           ram_addr_t size)
2150e5896b12SAnthony PERARD {
2151e5896b12SAnthony PERARD     CPUPhysMemoryClient *client;
2152e5896b12SAnthony PERARD     QLIST_FOREACH(client, &memory_client_list, list) {
2153e5896b12SAnthony PERARD         if (client->log_stop) {
2154e5896b12SAnthony PERARD             int r = client->log_stop(client, start_addr, size);
2155e5896b12SAnthony PERARD             if (r < 0) {
2156e5896b12SAnthony PERARD                 return r;
2157e5896b12SAnthony PERARD             }
2158e5896b12SAnthony PERARD         }
2159e5896b12SAnthony PERARD     }
2160e5896b12SAnthony PERARD     return 0;
2161e5896b12SAnthony PERARD }
2162e5896b12SAnthony PERARD 
21633a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
21643a7d929eSbellard {
2165c227f099SAnthony Liguori     ram_addr_t ram_addr;
21665579c7f3Spbrook     void *p;
21673a7d929eSbellard 
216884b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
21695579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
21705579c7f3Spbrook             + tlb_entry->addend);
2171e890261fSMarcelo Tosatti         ram_addr = qemu_ram_addr_from_host_nofail(p);
21723a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
21730f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
21743a7d929eSbellard         }
21753a7d929eSbellard     }
21763a7d929eSbellard }
21773a7d929eSbellard 
21783a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
21793a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
21803a7d929eSbellard {
21813a7d929eSbellard     int i;
2182cfde4bd9SIsaku Yamahata     int mmu_idx;
2183cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21843a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2185cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2186cfde4bd9SIsaku Yamahata     }
21873a7d929eSbellard }
21883a7d929eSbellard 
21890f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
21901ccde1cbSbellard {
21910f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
21920f459d16Spbrook         tlb_entry->addr_write = vaddr;
21931ccde1cbSbellard }
21941ccde1cbSbellard 
21950f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
21960f459d16Spbrook    so that it is no longer dirty */
21970f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
21981ccde1cbSbellard {
21991ccde1cbSbellard     int i;
2200cfde4bd9SIsaku Yamahata     int mmu_idx;
22011ccde1cbSbellard 
22020f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
22031ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2204cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2205cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
22061ccde1cbSbellard }
22071ccde1cbSbellard 
2208d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by
2209d4c430a8SPaul Brook    large pages and trigger a full TLB flush if these are invalidated.  */
2210d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2211d4c430a8SPaul Brook                                target_ulong size)
2212d4c430a8SPaul Brook {
2213d4c430a8SPaul Brook     target_ulong mask = ~(size - 1);
2214d4c430a8SPaul Brook 
2215d4c430a8SPaul Brook     if (env->tlb_flush_addr == (target_ulong)-1) {
2216d4c430a8SPaul Brook         env->tlb_flush_addr = vaddr & mask;
2217d4c430a8SPaul Brook         env->tlb_flush_mask = mask;
2218d4c430a8SPaul Brook         return;
2219d4c430a8SPaul Brook     }
2220d4c430a8SPaul Brook     /* Extend the existing region to include the new page.
2221d4c430a8SPaul Brook        This is a compromise between unnecessary flushes and the cost
2222d4c430a8SPaul Brook        of maintaining a full variable size TLB.  */
2223d4c430a8SPaul Brook     mask &= env->tlb_flush_mask;
2224d4c430a8SPaul Brook     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2225d4c430a8SPaul Brook         mask <<= 1;
2226d4c430a8SPaul Brook     }
2227d4c430a8SPaul Brook     env->tlb_flush_addr &= mask;
2228d4c430a8SPaul Brook     env->tlb_flush_mask = mask;
2229d4c430a8SPaul Brook }
2230d4c430a8SPaul Brook 
2231d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address
2232d4c430a8SPaul Brook    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2233d4c430a8SPaul Brook    supplied size is only used by tlb_flush_page.  */
2234d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr,
2235c227f099SAnthony Liguori                   target_phys_addr_t paddr, int prot,
2236d4c430a8SPaul Brook                   int mmu_idx, target_ulong size)
22379fa3e853Sbellard {
223892e873b9Sbellard     PhysPageDesc *p;
22394f2ac237Sbellard     unsigned long pd;
22409fa3e853Sbellard     unsigned int index;
22414f2ac237Sbellard     target_ulong address;
22420f459d16Spbrook     target_ulong code_address;
2243355b1943SPaul Brook     unsigned long addend;
224484b7b8e7Sbellard     CPUTLBEntry *te;
2245a1d1bb31Saliguori     CPUWatchpoint *wp;
2246c227f099SAnthony Liguori     target_phys_addr_t iotlb;
22479fa3e853Sbellard 
2248d4c430a8SPaul Brook     assert(size >= TARGET_PAGE_SIZE);
2249d4c430a8SPaul Brook     if (size != TARGET_PAGE_SIZE) {
2250d4c430a8SPaul Brook         tlb_add_large_page(env, vaddr, size);
2251d4c430a8SPaul Brook     }
225292e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
22539fa3e853Sbellard     if (!p) {
22549fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
22559fa3e853Sbellard     } else {
22569fa3e853Sbellard         pd = p->phys_offset;
22579fa3e853Sbellard     }
22589fa3e853Sbellard #if defined(DEBUG_TLB)
22597fd3f494SStefan Weil     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
22607fd3f494SStefan Weil            " prot=%x idx=%d pd=0x%08lx\n",
22617fd3f494SStefan Weil            vaddr, paddr, prot, mmu_idx, pd);
22629fa3e853Sbellard #endif
22639fa3e853Sbellard 
22649fa3e853Sbellard     address = vaddr;
22650f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
22660f459d16Spbrook         /* IO memory case (romd handled later) */
22670f459d16Spbrook         address |= TLB_MMIO;
22680f459d16Spbrook     }
22695579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
22700f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
22710f459d16Spbrook         /* Normal RAM.  */
22720f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
22730f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
22740f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
22750f459d16Spbrook         else
22760f459d16Spbrook             iotlb |= IO_MEM_ROM;
22770f459d16Spbrook     } else {
2278ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
22790f459d16Spbrook            It would be nice to pass an offset from the base address
22800f459d16Spbrook            of that region.  This would avoid having to special case RAM,
22810f459d16Spbrook            and avoid full address decoding in every device.
22820f459d16Spbrook            We can't use the high bits of pd for this because
22830f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
22848da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
22858da3ff18Spbrook         if (p) {
22868da3ff18Spbrook             iotlb += p->region_offset;
22878da3ff18Spbrook         } else {
22888da3ff18Spbrook             iotlb += paddr;
22898da3ff18Spbrook         }
22909fa3e853Sbellard     }
22919fa3e853Sbellard 
22920f459d16Spbrook     code_address = address;
22936658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
22946658ffb8Spbrook        watchpoint trap routines.  */
229572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2296a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2297bf298f83SJun Koi             /* Avoid trapping reads of pages with a write breakpoint. */
2298bf298f83SJun Koi             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
22990f459d16Spbrook                 iotlb = io_mem_watch + paddr;
23000f459d16Spbrook                 address |= TLB_MMIO;
2301bf298f83SJun Koi                 break;
2302bf298f83SJun Koi             }
23036658ffb8Spbrook         }
23046658ffb8Spbrook     }
23056658ffb8Spbrook 
230690f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
23070f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
23086ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
23090f459d16Spbrook     te->addend = addend - vaddr;
231067b915a5Sbellard     if (prot & PAGE_READ) {
231184b7b8e7Sbellard         te->addr_read = address;
23129fa3e853Sbellard     } else {
231384b7b8e7Sbellard         te->addr_read = -1;
231484b7b8e7Sbellard     }
23155c751e99Sedgar_igl 
231684b7b8e7Sbellard     if (prot & PAGE_EXEC) {
23170f459d16Spbrook         te->addr_code = code_address;
231884b7b8e7Sbellard     } else {
231984b7b8e7Sbellard         te->addr_code = -1;
23209fa3e853Sbellard     }
232167b915a5Sbellard     if (prot & PAGE_WRITE) {
2322856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2323856074ecSbellard             (pd & IO_MEM_ROMD)) {
23240f459d16Spbrook             /* Write access calls the I/O callback.  */
23250f459d16Spbrook             te->addr_write = address | TLB_MMIO;
23263a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
23271ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
23280f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
23299fa3e853Sbellard         } else {
233084b7b8e7Sbellard             te->addr_write = address;
23319fa3e853Sbellard         }
23329fa3e853Sbellard     } else {
233384b7b8e7Sbellard         te->addr_write = -1;
23349fa3e853Sbellard     }
23359fa3e853Sbellard }
23369fa3e853Sbellard 
23370124311eSbellard #else
23380124311eSbellard 
2339ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
23400124311eSbellard {
23410124311eSbellard }
23420124311eSbellard 
23432e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
23440124311eSbellard {
23450124311eSbellard }
23460124311eSbellard 
2347edf8e2afSMika Westerberg /*
2348edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2349edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2350edf8e2afSMika Westerberg  */
23515cd2c5b6SRichard Henderson 
23525cd2c5b6SRichard Henderson struct walk_memory_regions_data
235333417e70Sbellard {
23545cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
23555cd2c5b6SRichard Henderson     void *priv;
23565cd2c5b6SRichard Henderson     unsigned long start;
23575cd2c5b6SRichard Henderson     int prot;
23585cd2c5b6SRichard Henderson };
23599fa3e853Sbellard 
23605cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2361b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
23625cd2c5b6SRichard Henderson {
23635cd2c5b6SRichard Henderson     if (data->start != -1ul) {
23645cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
23655cd2c5b6SRichard Henderson         if (rc != 0) {
23665cd2c5b6SRichard Henderson             return rc;
23675cd2c5b6SRichard Henderson         }
23685cd2c5b6SRichard Henderson     }
2369edf8e2afSMika Westerberg 
23705cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
23715cd2c5b6SRichard Henderson     data->prot = new_prot;
23725cd2c5b6SRichard Henderson 
23735cd2c5b6SRichard Henderson     return 0;
237433417e70Sbellard }
23755cd2c5b6SRichard Henderson 
23765cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2377b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
23785cd2c5b6SRichard Henderson {
2379b480d9b7SPaul Brook     abi_ulong pa;
23805cd2c5b6SRichard Henderson     int i, rc;
23815cd2c5b6SRichard Henderson 
23825cd2c5b6SRichard Henderson     if (*lp == NULL) {
23835cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
23849fa3e853Sbellard     }
23855cd2c5b6SRichard Henderson 
23865cd2c5b6SRichard Henderson     if (level == 0) {
23875cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
23887296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
23895cd2c5b6SRichard Henderson             int prot = pd[i].flags;
23905cd2c5b6SRichard Henderson 
23915cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
23925cd2c5b6SRichard Henderson             if (prot != data->prot) {
23935cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
23945cd2c5b6SRichard Henderson                 if (rc != 0) {
23955cd2c5b6SRichard Henderson                     return rc;
23969fa3e853Sbellard                 }
23979fa3e853Sbellard             }
23985cd2c5b6SRichard Henderson         }
23995cd2c5b6SRichard Henderson     } else {
24005cd2c5b6SRichard Henderson         void **pp = *lp;
24017296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
2402b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
2403b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
24045cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
24055cd2c5b6SRichard Henderson             if (rc != 0) {
24065cd2c5b6SRichard Henderson                 return rc;
24075cd2c5b6SRichard Henderson             }
24085cd2c5b6SRichard Henderson         }
24095cd2c5b6SRichard Henderson     }
24105cd2c5b6SRichard Henderson 
24115cd2c5b6SRichard Henderson     return 0;
24125cd2c5b6SRichard Henderson }
24135cd2c5b6SRichard Henderson 
24145cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
24155cd2c5b6SRichard Henderson {
24165cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
24175cd2c5b6SRichard Henderson     unsigned long i;
24185cd2c5b6SRichard Henderson 
24195cd2c5b6SRichard Henderson     data.fn = fn;
24205cd2c5b6SRichard Henderson     data.priv = priv;
24215cd2c5b6SRichard Henderson     data.start = -1ul;
24225cd2c5b6SRichard Henderson     data.prot = 0;
24235cd2c5b6SRichard Henderson 
24245cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2425b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
24265cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
24275cd2c5b6SRichard Henderson         if (rc != 0) {
24285cd2c5b6SRichard Henderson             return rc;
24295cd2c5b6SRichard Henderson         }
24305cd2c5b6SRichard Henderson     }
24315cd2c5b6SRichard Henderson 
24325cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2433edf8e2afSMika Westerberg }
2434edf8e2afSMika Westerberg 
2435b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2436b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2437edf8e2afSMika Westerberg {
2438edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2439edf8e2afSMika Westerberg 
2440b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2441b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2442edf8e2afSMika Westerberg         start, end, end - start,
2443edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2444edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2445edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2446edf8e2afSMika Westerberg 
2447edf8e2afSMika Westerberg     return (0);
2448edf8e2afSMika Westerberg }
2449edf8e2afSMika Westerberg 
2450edf8e2afSMika Westerberg /* dump memory mappings */
2451edf8e2afSMika Westerberg void page_dump(FILE *f)
2452edf8e2afSMika Westerberg {
2453edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2454edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2455edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
24569fa3e853Sbellard }
24579fa3e853Sbellard 
245853a5960aSpbrook int page_get_flags(target_ulong address)
24599fa3e853Sbellard {
24609fa3e853Sbellard     PageDesc *p;
24619fa3e853Sbellard 
24629fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
24639fa3e853Sbellard     if (!p)
24649fa3e853Sbellard         return 0;
24659fa3e853Sbellard     return p->flags;
24669fa3e853Sbellard }
24679fa3e853Sbellard 
2468376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2469376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2470376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
247153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
24729fa3e853Sbellard {
2473376a7909SRichard Henderson     target_ulong addr, len;
24749fa3e853Sbellard 
2475376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2476376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2477376a7909SRichard Henderson        a missing call to h2g_valid.  */
2478b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2479b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2480376a7909SRichard Henderson #endif
2481376a7909SRichard Henderson     assert(start < end);
2482376a7909SRichard Henderson 
24839fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
24849fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2485376a7909SRichard Henderson 
2486376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
24879fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2488376a7909SRichard Henderson     }
2489376a7909SRichard Henderson 
2490376a7909SRichard Henderson     for (addr = start, len = end - start;
2491376a7909SRichard Henderson          len != 0;
2492376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2493376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2494376a7909SRichard Henderson 
2495376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2496376a7909SRichard Henderson            the code inside.  */
24979fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
24989fa3e853Sbellard             (flags & PAGE_WRITE) &&
24999fa3e853Sbellard             p->first_tb) {
2500d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
25019fa3e853Sbellard         }
25029fa3e853Sbellard         p->flags = flags;
25039fa3e853Sbellard     }
25049fa3e853Sbellard }
25059fa3e853Sbellard 
25063d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
25073d97b40bSths {
25083d97b40bSths     PageDesc *p;
25093d97b40bSths     target_ulong end;
25103d97b40bSths     target_ulong addr;
25113d97b40bSths 
2512376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2513376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2514376a7909SRichard Henderson        a missing call to h2g_valid.  */
2515338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2516338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2517376a7909SRichard Henderson #endif
2518376a7909SRichard Henderson 
25193e0650a9SRichard Henderson     if (len == 0) {
25203e0650a9SRichard Henderson         return 0;
25213e0650a9SRichard Henderson     }
2522376a7909SRichard Henderson     if (start + len - 1 < start) {
2523376a7909SRichard Henderson         /* We've wrapped around.  */
252455f280c9Sbalrog         return -1;
2525376a7909SRichard Henderson     }
252655f280c9Sbalrog 
25273d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
25283d97b40bSths     start = start & TARGET_PAGE_MASK;
25293d97b40bSths 
2530376a7909SRichard Henderson     for (addr = start, len = end - start;
2531376a7909SRichard Henderson          len != 0;
2532376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
25333d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
25343d97b40bSths         if( !p )
25353d97b40bSths             return -1;
25363d97b40bSths         if( !(p->flags & PAGE_VALID) )
25373d97b40bSths             return -1;
25383d97b40bSths 
2539dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
25403d97b40bSths             return -1;
2541dae3270cSbellard         if (flags & PAGE_WRITE) {
2542dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
25433d97b40bSths                 return -1;
2544dae3270cSbellard             /* unprotect the page if it was put read-only because it
2545dae3270cSbellard                contains translated code */
2546dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2547dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2548dae3270cSbellard                     return -1;
2549dae3270cSbellard             }
2550dae3270cSbellard             return 0;
2551dae3270cSbellard         }
25523d97b40bSths     }
25533d97b40bSths     return 0;
25543d97b40bSths }
25553d97b40bSths 
25569fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2557ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
255853a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
25599fa3e853Sbellard {
256045d679d6SAurelien Jarno     unsigned int prot;
256145d679d6SAurelien Jarno     PageDesc *p;
256253a5960aSpbrook     target_ulong host_start, host_end, addr;
25639fa3e853Sbellard 
2564c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2565c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2566c8a706feSpbrook        practice it seems to be ok.  */
2567c8a706feSpbrook     mmap_lock();
2568c8a706feSpbrook 
256945d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
257045d679d6SAurelien Jarno     if (!p) {
2571c8a706feSpbrook         mmap_unlock();
25729fa3e853Sbellard         return 0;
2573c8a706feSpbrook     }
257445d679d6SAurelien Jarno 
25759fa3e853Sbellard     /* if the page was really writable, then we change its
25769fa3e853Sbellard        protection back to writable */
257745d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
257845d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
257945d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
258045d679d6SAurelien Jarno 
258145d679d6SAurelien Jarno         prot = 0;
258245d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
258345d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
258445d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
258545d679d6SAurelien Jarno             prot |= p->flags;
258645d679d6SAurelien Jarno 
25879fa3e853Sbellard             /* and since the content will be modified, we must invalidate
25889fa3e853Sbellard                the corresponding translated code. */
258945d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
25909fa3e853Sbellard #ifdef DEBUG_TB_CHECK
259145d679d6SAurelien Jarno             tb_invalidate_check(addr);
25929fa3e853Sbellard #endif
259345d679d6SAurelien Jarno         }
259445d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
259545d679d6SAurelien Jarno                  prot & PAGE_BITS);
259645d679d6SAurelien Jarno 
2597c8a706feSpbrook         mmap_unlock();
25989fa3e853Sbellard         return 1;
25999fa3e853Sbellard     }
2600c8a706feSpbrook     mmap_unlock();
26019fa3e853Sbellard     return 0;
26029fa3e853Sbellard }
26039fa3e853Sbellard 
26046a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
26056a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
26061ccde1cbSbellard {
26071ccde1cbSbellard }
26089fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
260933417e70Sbellard 
2610e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
26118da3ff18Spbrook 
2612c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2613c04b2b78SPaul Brook typedef struct subpage_t {
2614c04b2b78SPaul Brook     target_phys_addr_t base;
2615f6405247SRichard Henderson     ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2616f6405247SRichard Henderson     ram_addr_t region_offset[TARGET_PAGE_SIZE];
2617c04b2b78SPaul Brook } subpage_t;
2618c04b2b78SPaul Brook 
2619c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2620c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2621f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622f6405247SRichard Henderson                                 ram_addr_t orig_memory,
2623f6405247SRichard Henderson                                 ram_addr_t region_offset);
2624db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2625db7b5426Sblueswir1                       need_subpage)                                     \
2626db7b5426Sblueswir1     do {                                                                \
2627db7b5426Sblueswir1         if (addr > start_addr)                                          \
2628db7b5426Sblueswir1             start_addr2 = 0;                                            \
2629db7b5426Sblueswir1         else {                                                          \
2630db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2631db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2632db7b5426Sblueswir1                 need_subpage = 1;                                       \
2633db7b5426Sblueswir1         }                                                               \
2634db7b5426Sblueswir1                                                                         \
263549e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2636db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2637db7b5426Sblueswir1         else {                                                          \
2638db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2639db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2640db7b5426Sblueswir1                 need_subpage = 1;                                       \
2641db7b5426Sblueswir1         }                                                               \
2642db7b5426Sblueswir1     } while (0)
2643db7b5426Sblueswir1 
26448f2498f9SMichael S. Tsirkin /* register physical memory.
26458f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
26468f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
26478da3ff18Spbrook    io memory page.  The address used when calling the IO function is
26488da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2649ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
26508da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
26518da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
26520fd542fbSMichael S. Tsirkin void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2653c227f099SAnthony Liguori                                          ram_addr_t size,
2654c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
26550fd542fbSMichael S. Tsirkin                                          ram_addr_t region_offset,
26560fd542fbSMichael S. Tsirkin                                          bool log_dirty)
265733417e70Sbellard {
2658c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
265992e873b9Sbellard     PhysPageDesc *p;
26609d42037bSbellard     CPUState *env;
2661c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2662f6405247SRichard Henderson     subpage_t *subpage;
266333417e70Sbellard 
26643b8e6a2dSEdgar E. Iglesias     assert(size);
26650fd542fbSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2666f6f3fbcaSMichael S. Tsirkin 
266767c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
266867c4d23cSpbrook         region_offset = start_addr;
266967c4d23cSpbrook     }
26708da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
26715fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2672c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
26733b8e6a2dSEdgar E. Iglesias 
26743b8e6a2dSEdgar E. Iglesias     addr = start_addr;
26753b8e6a2dSEdgar E. Iglesias     do {
2676db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2678c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2679c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2680db7b5426Sblueswir1             int need_subpage = 0;
2681db7b5426Sblueswir1 
2682db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2683db7b5426Sblueswir1                           need_subpage);
2684f6405247SRichard Henderson             if (need_subpage) {
2685db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2686db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26878da3ff18Spbrook                                            &p->phys_offset, orig_memory,
26888da3ff18Spbrook                                            p->region_offset);
2689db7b5426Sblueswir1                 } else {
2690db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2691db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2692db7b5426Sblueswir1                 }
26938da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
26948da3ff18Spbrook                                  region_offset);
26958da3ff18Spbrook                 p->region_offset = 0;
2696db7b5426Sblueswir1             } else {
2697db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2698db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2700db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2701db7b5426Sblueswir1             }
2702db7b5426Sblueswir1         } else {
2703108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
27049fa3e853Sbellard             p->phys_offset = phys_offset;
27058da3ff18Spbrook             p->region_offset = region_offset;
27062a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
27078da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
270833417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
27098da3ff18Spbrook             } else {
2710c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2711db7b5426Sblueswir1                 int need_subpage = 0;
2712db7b5426Sblueswir1 
2713db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2714db7b5426Sblueswir1                               end_addr2, need_subpage);
2715db7b5426Sblueswir1 
2716f6405247SRichard Henderson                 if (need_subpage) {
2717db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
27188da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
271967c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2720db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
27218da3ff18Spbrook                                      phys_offset, region_offset);
27228da3ff18Spbrook                     p->region_offset = 0;
2723db7b5426Sblueswir1                 }
2724db7b5426Sblueswir1             }
2725db7b5426Sblueswir1         }
27268da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
27273b8e6a2dSEdgar E. Iglesias         addr += TARGET_PAGE_SIZE;
27283b8e6a2dSEdgar E. Iglesias     } while (addr != end_addr);
27299d42037bSbellard 
27309d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
27319d42037bSbellard        reset the modified entries */
27329d42037bSbellard     /* XXX: slow ! */
27339d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
27349d42037bSbellard         tlb_flush(env, 1);
27359d42037bSbellard     }
273633417e70Sbellard }
273733417e70Sbellard 
2738ba863458Sbellard /* XXX: temporary until new memory mapping API */
2739c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2740ba863458Sbellard {
2741ba863458Sbellard     PhysPageDesc *p;
2742ba863458Sbellard 
2743ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744ba863458Sbellard     if (!p)
2745ba863458Sbellard         return IO_MEM_UNASSIGNED;
2746ba863458Sbellard     return p->phys_offset;
2747ba863458Sbellard }
2748ba863458Sbellard 
2749c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2750f65ed4c1Saliguori {
2751f65ed4c1Saliguori     if (kvm_enabled())
2752f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2753f65ed4c1Saliguori }
2754f65ed4c1Saliguori 
2755c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2756f65ed4c1Saliguori {
2757f65ed4c1Saliguori     if (kvm_enabled())
2758f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2759f65ed4c1Saliguori }
2760f65ed4c1Saliguori 
276162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
276262a2744cSSheng Yang {
276362a2744cSSheng Yang     if (kvm_enabled())
276462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
276562a2744cSSheng Yang }
276662a2744cSSheng Yang 
2767c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2768c902760fSMarcelo Tosatti 
2769c902760fSMarcelo Tosatti #include <sys/vfs.h>
2770c902760fSMarcelo Tosatti 
2771c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2772c902760fSMarcelo Tosatti 
2773c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2774c902760fSMarcelo Tosatti {
2775c902760fSMarcelo Tosatti     struct statfs fs;
2776c902760fSMarcelo Tosatti     int ret;
2777c902760fSMarcelo Tosatti 
2778c902760fSMarcelo Tosatti     do {
2779c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2780c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2781c902760fSMarcelo Tosatti 
2782c902760fSMarcelo Tosatti     if (ret != 0) {
27836adc0549SMichael Tokarev         perror(path);
2784c902760fSMarcelo Tosatti         return 0;
2785c902760fSMarcelo Tosatti     }
2786c902760fSMarcelo Tosatti 
2787c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2788c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2789c902760fSMarcelo Tosatti 
2790c902760fSMarcelo Tosatti     return fs.f_bsize;
2791c902760fSMarcelo Tosatti }
2792c902760fSMarcelo Tosatti 
279304b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
279404b16653SAlex Williamson                             ram_addr_t memory,
279504b16653SAlex Williamson                             const char *path)
2796c902760fSMarcelo Tosatti {
2797c902760fSMarcelo Tosatti     char *filename;
2798c902760fSMarcelo Tosatti     void *area;
2799c902760fSMarcelo Tosatti     int fd;
2800c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2801c902760fSMarcelo Tosatti     int flags;
2802c902760fSMarcelo Tosatti #endif
2803c902760fSMarcelo Tosatti     unsigned long hpagesize;
2804c902760fSMarcelo Tosatti 
2805c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2806c902760fSMarcelo Tosatti     if (!hpagesize) {
2807c902760fSMarcelo Tosatti         return NULL;
2808c902760fSMarcelo Tosatti     }
2809c902760fSMarcelo Tosatti 
2810c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2811c902760fSMarcelo Tosatti         return NULL;
2812c902760fSMarcelo Tosatti     }
2813c902760fSMarcelo Tosatti 
2814c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2815c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2816c902760fSMarcelo Tosatti         return NULL;
2817c902760fSMarcelo Tosatti     }
2818c902760fSMarcelo Tosatti 
2819c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2820c902760fSMarcelo Tosatti         return NULL;
2821c902760fSMarcelo Tosatti     }
2822c902760fSMarcelo Tosatti 
2823c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2824c902760fSMarcelo Tosatti     if (fd < 0) {
28256adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2826c902760fSMarcelo Tosatti         free(filename);
2827c902760fSMarcelo Tosatti         return NULL;
2828c902760fSMarcelo Tosatti     }
2829c902760fSMarcelo Tosatti     unlink(filename);
2830c902760fSMarcelo Tosatti     free(filename);
2831c902760fSMarcelo Tosatti 
2832c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2833c902760fSMarcelo Tosatti 
2834c902760fSMarcelo Tosatti     /*
2835c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2836c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2837c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2838c902760fSMarcelo Tosatti      * mmap will fail.
2839c902760fSMarcelo Tosatti      */
2840c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2841c902760fSMarcelo Tosatti         perror("ftruncate");
2842c902760fSMarcelo Tosatti 
2843c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2844c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2845c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2846c902760fSMarcelo Tosatti      * to sidestep this quirk.
2847c902760fSMarcelo Tosatti      */
2848c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2849c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2850c902760fSMarcelo Tosatti #else
2851c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2852c902760fSMarcelo Tosatti #endif
2853c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2854c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2855c902760fSMarcelo Tosatti         close(fd);
2856c902760fSMarcelo Tosatti         return (NULL);
2857c902760fSMarcelo Tosatti     }
285804b16653SAlex Williamson     block->fd = fd;
2859c902760fSMarcelo Tosatti     return area;
2860c902760fSMarcelo Tosatti }
2861c902760fSMarcelo Tosatti #endif
2862c902760fSMarcelo Tosatti 
2863d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2864d17b5288SAlex Williamson {
286504b16653SAlex Williamson     RAMBlock *block, *next_block;
2866f15fbc4bSAnthony PERARD     ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
286704b16653SAlex Williamson 
286804b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
286904b16653SAlex Williamson         return 0;
287004b16653SAlex Williamson 
287104b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2872f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
287304b16653SAlex Williamson 
287404b16653SAlex Williamson         end = block->offset + block->length;
287504b16653SAlex Williamson 
287604b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
287704b16653SAlex Williamson             if (next_block->offset >= end) {
287804b16653SAlex Williamson                 next = MIN(next, next_block->offset);
287904b16653SAlex Williamson             }
288004b16653SAlex Williamson         }
288104b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
288204b16653SAlex Williamson             offset =  end;
288304b16653SAlex Williamson             mingap = next - end;
288404b16653SAlex Williamson         }
288504b16653SAlex Williamson     }
288604b16653SAlex Williamson     return offset;
288704b16653SAlex Williamson }
288804b16653SAlex Williamson 
288904b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
289004b16653SAlex Williamson {
2891d17b5288SAlex Williamson     RAMBlock *block;
2892d17b5288SAlex Williamson     ram_addr_t last = 0;
2893d17b5288SAlex Williamson 
2894d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2895d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2896d17b5288SAlex Williamson 
2897d17b5288SAlex Williamson     return last;
2898d17b5288SAlex Williamson }
2899d17b5288SAlex Williamson 
290084b89d78SCam Macdonell ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
290184b89d78SCam Macdonell                                    ram_addr_t size, void *host)
290284b89d78SCam Macdonell {
290384b89d78SCam Macdonell     RAMBlock *new_block, *block;
290484b89d78SCam Macdonell 
290584b89d78SCam Macdonell     size = TARGET_PAGE_ALIGN(size);
290684b89d78SCam Macdonell     new_block = qemu_mallocz(sizeof(*new_block));
290784b89d78SCam Macdonell 
290884b89d78SCam Macdonell     if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
290984b89d78SCam Macdonell         char *id = dev->parent_bus->info->get_dev_path(dev);
291084b89d78SCam Macdonell         if (id) {
291184b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
291284b89d78SCam Macdonell             qemu_free(id);
291384b89d78SCam Macdonell         }
291484b89d78SCam Macdonell     }
291584b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
291684b89d78SCam Macdonell 
291784b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
291884b89d78SCam Macdonell         if (!strcmp(block->idstr, new_block->idstr)) {
291984b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
292084b89d78SCam Macdonell                     new_block->idstr);
292184b89d78SCam Macdonell             abort();
292284b89d78SCam Macdonell         }
292384b89d78SCam Macdonell     }
292484b89d78SCam Macdonell 
2925432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
29266977dfe6SYoshiaki Tamura     if (host) {
292784b89d78SCam Macdonell         new_block->host = host;
2928cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
29296977dfe6SYoshiaki Tamura     } else {
2930c902760fSMarcelo Tosatti         if (mem_path) {
2931c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
293204b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2933618a568dSMarcelo Tosatti             if (!new_block->host) {
2934618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
2935e78815a5SAndreas Färber                 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2936618a568dSMarcelo Tosatti             }
2937c902760fSMarcelo Tosatti #else
2938c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2939c902760fSMarcelo Tosatti             exit(1);
2940c902760fSMarcelo Tosatti #endif
2941c902760fSMarcelo Tosatti         } else {
29426b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2943ff83678aSChristian Borntraeger             /* S390 KVM requires the topmost vma of the RAM to be smaller than
2944ff83678aSChristian Borntraeger                an system defined value, which is at least 256GB. Larger systems
2945ff83678aSChristian Borntraeger                have larger values. We put the guest between the end of data
2946ff83678aSChristian Borntraeger                segment (system break) and this value. We use 32GB as a base to
2947ff83678aSChristian Borntraeger                have enough room for the system break to grow. */
2948ff83678aSChristian Borntraeger             new_block->host = mmap((void*)0x800000000, size,
2949c902760fSMarcelo Tosatti                                    PROT_EXEC|PROT_READ|PROT_WRITE,
2950ff83678aSChristian Borntraeger                                    MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2951fb8b2735SAlexander Graf             if (new_block->host == MAP_FAILED) {
2952fb8b2735SAlexander Graf                 fprintf(stderr, "Allocating RAM failed\n");
2953fb8b2735SAlexander Graf                 abort();
2954fb8b2735SAlexander Graf             }
29556b02494dSAlexander Graf #else
2956868bb33fSJan Kiszka             if (xen_enabled()) {
2957432d268cSJun Nakajima                 xen_ram_alloc(new_block->offset, size);
2958432d268cSJun Nakajima             } else {
295994a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2960432d268cSJun Nakajima             }
29616b02494dSAlexander Graf #endif
2962e78815a5SAndreas Färber             qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2963c902760fSMarcelo Tosatti         }
29646977dfe6SYoshiaki Tamura     }
296594a6b54fSpbrook     new_block->length = size;
296694a6b54fSpbrook 
2967f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
296894a6b54fSpbrook 
2969f471a17eSAlex Williamson     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
297004b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
2971d17b5288SAlex Williamson     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
297294a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
297394a6b54fSpbrook 
29746f0437e8SJan Kiszka     if (kvm_enabled())
29756f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
29766f0437e8SJan Kiszka 
297794a6b54fSpbrook     return new_block->offset;
297894a6b54fSpbrook }
2979e9a1ab19Sbellard 
29806977dfe6SYoshiaki Tamura ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
29816977dfe6SYoshiaki Tamura {
29826977dfe6SYoshiaki Tamura     return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
29836977dfe6SYoshiaki Tamura }
29846977dfe6SYoshiaki Tamura 
29851f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
29861f2e98b6SAlex Williamson {
29871f2e98b6SAlex Williamson     RAMBlock *block;
29881f2e98b6SAlex Williamson 
29891f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
29901f2e98b6SAlex Williamson         if (addr == block->offset) {
29911f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
29921f2e98b6SAlex Williamson             qemu_free(block);
29931f2e98b6SAlex Williamson             return;
29941f2e98b6SAlex Williamson         }
29951f2e98b6SAlex Williamson     }
29961f2e98b6SAlex Williamson }
29971f2e98b6SAlex Williamson 
2998c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2999e9a1ab19Sbellard {
300004b16653SAlex Williamson     RAMBlock *block;
300104b16653SAlex Williamson 
300204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
300304b16653SAlex Williamson         if (addr == block->offset) {
300404b16653SAlex Williamson             QLIST_REMOVE(block, next);
3005cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
3006cd19cfa2SHuang Ying                 ;
3007cd19cfa2SHuang Ying             } else if (mem_path) {
300804b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
300904b16653SAlex Williamson                 if (block->fd) {
301004b16653SAlex Williamson                     munmap(block->host, block->length);
301104b16653SAlex Williamson                     close(block->fd);
301204b16653SAlex Williamson                 } else {
301304b16653SAlex Williamson                     qemu_vfree(block->host);
301404b16653SAlex Williamson                 }
3015fd28aa13SJan Kiszka #else
3016fd28aa13SJan Kiszka                 abort();
301704b16653SAlex Williamson #endif
301804b16653SAlex Williamson             } else {
301904b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
302004b16653SAlex Williamson                 munmap(block->host, block->length);
302104b16653SAlex Williamson #else
3022868bb33fSJan Kiszka                 if (xen_enabled()) {
3023e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
3024432d268cSJun Nakajima                 } else {
302504b16653SAlex Williamson                     qemu_vfree(block->host);
3026432d268cSJun Nakajima                 }
302704b16653SAlex Williamson #endif
302804b16653SAlex Williamson             }
302904b16653SAlex Williamson             qemu_free(block);
303004b16653SAlex Williamson             return;
303104b16653SAlex Williamson         }
303204b16653SAlex Williamson     }
303304b16653SAlex Williamson 
3034e9a1ab19Sbellard }
3035e9a1ab19Sbellard 
3036cd19cfa2SHuang Ying #ifndef _WIN32
3037cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3038cd19cfa2SHuang Ying {
3039cd19cfa2SHuang Ying     RAMBlock *block;
3040cd19cfa2SHuang Ying     ram_addr_t offset;
3041cd19cfa2SHuang Ying     int flags;
3042cd19cfa2SHuang Ying     void *area, *vaddr;
3043cd19cfa2SHuang Ying 
3044cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
3045cd19cfa2SHuang Ying         offset = addr - block->offset;
3046cd19cfa2SHuang Ying         if (offset < block->length) {
3047cd19cfa2SHuang Ying             vaddr = block->host + offset;
3048cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
3049cd19cfa2SHuang Ying                 ;
3050cd19cfa2SHuang Ying             } else {
3051cd19cfa2SHuang Ying                 flags = MAP_FIXED;
3052cd19cfa2SHuang Ying                 munmap(vaddr, length);
3053cd19cfa2SHuang Ying                 if (mem_path) {
3054cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
3055cd19cfa2SHuang Ying                     if (block->fd) {
3056cd19cfa2SHuang Ying #ifdef MAP_POPULATE
3057cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3058cd19cfa2SHuang Ying                             MAP_PRIVATE;
3059cd19cfa2SHuang Ying #else
3060cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
3061cd19cfa2SHuang Ying #endif
3062cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063cd19cfa2SHuang Ying                                     flags, block->fd, offset);
3064cd19cfa2SHuang Ying                     } else {
3065cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3066cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3067cd19cfa2SHuang Ying                                     flags, -1, 0);
3068cd19cfa2SHuang Ying                     }
3069fd28aa13SJan Kiszka #else
3070fd28aa13SJan Kiszka                     abort();
3071cd19cfa2SHuang Ying #endif
3072cd19cfa2SHuang Ying                 } else {
3073cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3074cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
3075cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3076cd19cfa2SHuang Ying                                 flags, -1, 0);
3077cd19cfa2SHuang Ying #else
3078cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3079cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080cd19cfa2SHuang Ying                                 flags, -1, 0);
3081cd19cfa2SHuang Ying #endif
3082cd19cfa2SHuang Ying                 }
3083cd19cfa2SHuang Ying                 if (area != vaddr) {
3084f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
3085f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3086cd19cfa2SHuang Ying                             length, addr);
3087cd19cfa2SHuang Ying                     exit(1);
3088cd19cfa2SHuang Ying                 }
3089cd19cfa2SHuang Ying                 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3090cd19cfa2SHuang Ying             }
3091cd19cfa2SHuang Ying             return;
3092cd19cfa2SHuang Ying         }
3093cd19cfa2SHuang Ying     }
3094cd19cfa2SHuang Ying }
3095cd19cfa2SHuang Ying #endif /* !_WIN32 */
3096cd19cfa2SHuang Ying 
3097dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
30985579c7f3Spbrook    With the exception of the softmmu code in this file, this should
30995579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
31005579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
31015579c7f3Spbrook 
31025579c7f3Spbrook    It should not be used for general purpose DMA.
31035579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
31045579c7f3Spbrook  */
3105c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
3106dc828ca1Spbrook {
310794a6b54fSpbrook     RAMBlock *block;
310894a6b54fSpbrook 
3109f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3110f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
31117d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
31127d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
3113f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
3114f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
31157d82af38SVincent Palatin             }
3116868bb33fSJan Kiszka             if (xen_enabled()) {
3117432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3118432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3119712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
3120432d268cSJun Nakajima                  */
3121432d268cSJun Nakajima                 if (block->offset == 0) {
3122e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
3123432d268cSJun Nakajima                 } else if (block->host == NULL) {
3124e41d7c69SJan Kiszka                     block->host =
3125e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
3126432d268cSJun Nakajima                 }
3127432d268cSJun Nakajima             }
3128f471a17eSAlex Williamson             return block->host + (addr - block->offset);
312994a6b54fSpbrook         }
3130f471a17eSAlex Williamson     }
3131f471a17eSAlex Williamson 
313294a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
313394a6b54fSpbrook     abort();
3134f471a17eSAlex Williamson 
3135f471a17eSAlex Williamson     return NULL;
3136dc828ca1Spbrook }
3137dc828ca1Spbrook 
3138b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
3139b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3140b2e0a138SMichael S. Tsirkin  */
3141b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
3142b2e0a138SMichael S. Tsirkin {
3143b2e0a138SMichael S. Tsirkin     RAMBlock *block;
3144b2e0a138SMichael S. Tsirkin 
3145b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
3146b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
3147868bb33fSJan Kiszka             if (xen_enabled()) {
3148432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
3149432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
3150712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
3151432d268cSJun Nakajima                  */
3152432d268cSJun Nakajima                 if (block->offset == 0) {
3153e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
3154432d268cSJun Nakajima                 } else if (block->host == NULL) {
3155e41d7c69SJan Kiszka                     block->host =
3156e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
3157432d268cSJun Nakajima                 }
3158432d268cSJun Nakajima             }
3159b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
3160b2e0a138SMichael S. Tsirkin         }
3161b2e0a138SMichael S. Tsirkin     }
3162b2e0a138SMichael S. Tsirkin 
3163b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3164b2e0a138SMichael S. Tsirkin     abort();
3165b2e0a138SMichael S. Tsirkin 
3166b2e0a138SMichael S. Tsirkin     return NULL;
3167b2e0a138SMichael S. Tsirkin }
3168b2e0a138SMichael S. Tsirkin 
316938bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
317038bee5dcSStefano Stabellini  * but takes a size argument */
31718ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
317238bee5dcSStefano Stabellini {
31738ab934f9SStefano Stabellini     if (*size == 0) {
31748ab934f9SStefano Stabellini         return NULL;
31758ab934f9SStefano Stabellini     }
3176868bb33fSJan Kiszka     if (xen_enabled()) {
3177e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
3178868bb33fSJan Kiszka     } else {
317938bee5dcSStefano Stabellini         RAMBlock *block;
318038bee5dcSStefano Stabellini 
318138bee5dcSStefano Stabellini         QLIST_FOREACH(block, &ram_list.blocks, next) {
318238bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
318338bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
318438bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
318538bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
318638bee5dcSStefano Stabellini             }
318738bee5dcSStefano Stabellini         }
318838bee5dcSStefano Stabellini 
318938bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
319038bee5dcSStefano Stabellini         abort();
319138bee5dcSStefano Stabellini     }
319238bee5dcSStefano Stabellini }
319338bee5dcSStefano Stabellini 
3194050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
3195050a0ddfSAnthony PERARD {
3196050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
3197050a0ddfSAnthony PERARD }
3198050a0ddfSAnthony PERARD 
3199e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
32005579c7f3Spbrook {
320194a6b54fSpbrook     RAMBlock *block;
320294a6b54fSpbrook     uint8_t *host = ptr;
320394a6b54fSpbrook 
3204868bb33fSJan Kiszka     if (xen_enabled()) {
3205e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
3206712c2b41SStefano Stabellini         return 0;
3207712c2b41SStefano Stabellini     }
3208712c2b41SStefano Stabellini 
3209f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
3210432d268cSJun Nakajima         /* This case append when the block is not mapped. */
3211432d268cSJun Nakajima         if (block->host == NULL) {
3212432d268cSJun Nakajima             continue;
3213432d268cSJun Nakajima         }
3214f471a17eSAlex Williamson         if (host - block->host < block->length) {
3215e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
3216e890261fSMarcelo Tosatti             return 0;
321794a6b54fSpbrook         }
3218f471a17eSAlex Williamson     }
3219432d268cSJun Nakajima 
3220e890261fSMarcelo Tosatti     return -1;
3221e890261fSMarcelo Tosatti }
3222f471a17eSAlex Williamson 
3223e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
3224e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
3225e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3226e890261fSMarcelo Tosatti {
3227e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
3228e890261fSMarcelo Tosatti 
3229e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
323094a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
323194a6b54fSpbrook         abort();
3232e890261fSMarcelo Tosatti     }
3233e890261fSMarcelo Tosatti     return ram_addr;
32345579c7f3Spbrook }
32355579c7f3Spbrook 
3236c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
323733417e70Sbellard {
323867d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3239ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
324067d3b957Spbrook #endif
32415b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3242b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
3243e18231a3Sblueswir1 #endif
3244e18231a3Sblueswir1     return 0;
3245e18231a3Sblueswir1 }
3246e18231a3Sblueswir1 
3247c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3248e18231a3Sblueswir1 {
3249e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3250e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3251e18231a3Sblueswir1 #endif
32525b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3253b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
3254e18231a3Sblueswir1 #endif
3255e18231a3Sblueswir1     return 0;
3256e18231a3Sblueswir1 }
3257e18231a3Sblueswir1 
3258c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3259e18231a3Sblueswir1 {
3260e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3261e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3262e18231a3Sblueswir1 #endif
32635b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3264b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
3265b4f0a316Sblueswir1 #endif
326633417e70Sbellard     return 0;
326733417e70Sbellard }
326833417e70Sbellard 
3269c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
327033417e70Sbellard {
327167d3b957Spbrook #ifdef DEBUG_UNASSIGNED
3272ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
327367d3b957Spbrook #endif
32745b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3275b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
3276e18231a3Sblueswir1 #endif
3277e18231a3Sblueswir1 }
3278e18231a3Sblueswir1 
3279c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3280e18231a3Sblueswir1 {
3281e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3282e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3283e18231a3Sblueswir1 #endif
32845b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3285b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
3286e18231a3Sblueswir1 #endif
3287e18231a3Sblueswir1 }
3288e18231a3Sblueswir1 
3289c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3290e18231a3Sblueswir1 {
3291e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
3292e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3293e18231a3Sblueswir1 #endif
32945b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3295b14ef7c9SBlue Swirl     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
3296b4f0a316Sblueswir1 #endif
329733417e70Sbellard }
329833417e70Sbellard 
3299d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
330033417e70Sbellard     unassigned_mem_readb,
3301e18231a3Sblueswir1     unassigned_mem_readw,
3302e18231a3Sblueswir1     unassigned_mem_readl,
330333417e70Sbellard };
330433417e70Sbellard 
3305d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
330633417e70Sbellard     unassigned_mem_writeb,
3307e18231a3Sblueswir1     unassigned_mem_writew,
3308e18231a3Sblueswir1     unassigned_mem_writel,
330933417e70Sbellard };
331033417e70Sbellard 
3311c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
33120f459d16Spbrook                                 uint32_t val)
33131ccde1cbSbellard {
33143a7d929eSbellard     int dirty_flags;
3315f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33163a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33173a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33183a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
3319f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33203a7d929eSbellard #endif
33213a7d929eSbellard     }
33225579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
3323f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3324f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3325f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3326f23db169Sbellard        flushed */
3327f23db169Sbellard     if (dirty_flags == 0xff)
33282e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33291ccde1cbSbellard }
33301ccde1cbSbellard 
3331c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
33320f459d16Spbrook                                 uint32_t val)
33331ccde1cbSbellard {
33343a7d929eSbellard     int dirty_flags;
3335f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33363a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33373a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33383a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
3339f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33403a7d929eSbellard #endif
33413a7d929eSbellard     }
33425579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
3343f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3344f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3345f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3346f23db169Sbellard        flushed */
3347f23db169Sbellard     if (dirty_flags == 0xff)
33482e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33491ccde1cbSbellard }
33501ccde1cbSbellard 
3351c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
33520f459d16Spbrook                                 uint32_t val)
33531ccde1cbSbellard {
33543a7d929eSbellard     int dirty_flags;
3355f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33563a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
33573a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
33583a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
3359f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
33603a7d929eSbellard #endif
33613a7d929eSbellard     }
33625579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
3363f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3364f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3365f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3366f23db169Sbellard        flushed */
3367f23db169Sbellard     if (dirty_flags == 0xff)
33682e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
33691ccde1cbSbellard }
33701ccde1cbSbellard 
3371d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
33723a7d929eSbellard     NULL, /* never used */
33733a7d929eSbellard     NULL, /* never used */
33743a7d929eSbellard     NULL, /* never used */
33753a7d929eSbellard };
33763a7d929eSbellard 
3377d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
33781ccde1cbSbellard     notdirty_mem_writeb,
33791ccde1cbSbellard     notdirty_mem_writew,
33801ccde1cbSbellard     notdirty_mem_writel,
33811ccde1cbSbellard };
33821ccde1cbSbellard 
33830f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
3384b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
33850f459d16Spbrook {
33860f459d16Spbrook     CPUState *env = cpu_single_env;
338706d55cc1Saliguori     target_ulong pc, cs_base;
338806d55cc1Saliguori     TranslationBlock *tb;
33890f459d16Spbrook     target_ulong vaddr;
3390a1d1bb31Saliguori     CPUWatchpoint *wp;
339106d55cc1Saliguori     int cpu_flags;
33920f459d16Spbrook 
339306d55cc1Saliguori     if (env->watchpoint_hit) {
339406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
339506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
339606d55cc1Saliguori          * current instruction. */
339706d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
339806d55cc1Saliguori         return;
339906d55cc1Saliguori     }
34002e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
340172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3402b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
3403b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
34046e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
34056e140f28Saliguori             if (!env->watchpoint_hit) {
3406a1d1bb31Saliguori                 env->watchpoint_hit = wp;
340706d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
340806d55cc1Saliguori                 if (!tb) {
34096e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
34106e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
341106d55cc1Saliguori                 }
3412618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
341306d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
341406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
341506d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
341606d55cc1Saliguori                 } else {
341706d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
341806d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
341906d55cc1Saliguori                 }
342006d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
34210f459d16Spbrook             }
34226e140f28Saliguori         } else {
34236e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
34246e140f28Saliguori         }
34250f459d16Spbrook     }
34260f459d16Spbrook }
34270f459d16Spbrook 
34286658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
34296658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
34306658ffb8Spbrook    phys routines.  */
3431c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
34326658ffb8Spbrook {
3433b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
34346658ffb8Spbrook     return ldub_phys(addr);
34356658ffb8Spbrook }
34366658ffb8Spbrook 
3437c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
34386658ffb8Spbrook {
3439b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
34406658ffb8Spbrook     return lduw_phys(addr);
34416658ffb8Spbrook }
34426658ffb8Spbrook 
3443c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
34446658ffb8Spbrook {
3445b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
34466658ffb8Spbrook     return ldl_phys(addr);
34476658ffb8Spbrook }
34486658ffb8Spbrook 
3449c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
34506658ffb8Spbrook                              uint32_t val)
34516658ffb8Spbrook {
3452b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
34536658ffb8Spbrook     stb_phys(addr, val);
34546658ffb8Spbrook }
34556658ffb8Spbrook 
3456c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
34576658ffb8Spbrook                              uint32_t val)
34586658ffb8Spbrook {
3459b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
34606658ffb8Spbrook     stw_phys(addr, val);
34616658ffb8Spbrook }
34626658ffb8Spbrook 
3463c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
34646658ffb8Spbrook                              uint32_t val)
34656658ffb8Spbrook {
3466b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
34676658ffb8Spbrook     stl_phys(addr, val);
34686658ffb8Spbrook }
34696658ffb8Spbrook 
3470d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
34716658ffb8Spbrook     watch_mem_readb,
34726658ffb8Spbrook     watch_mem_readw,
34736658ffb8Spbrook     watch_mem_readl,
34746658ffb8Spbrook };
34756658ffb8Spbrook 
3476d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
34776658ffb8Spbrook     watch_mem_writeb,
34786658ffb8Spbrook     watch_mem_writew,
34796658ffb8Spbrook     watch_mem_writel,
34806658ffb8Spbrook };
34816658ffb8Spbrook 
3482f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio,
3483f6405247SRichard Henderson                                         target_phys_addr_t addr,
3484db7b5426Sblueswir1                                         unsigned int len)
3485db7b5426Sblueswir1 {
3486f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3487db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3488db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3489db7b5426Sblueswir1            mmio, len, addr, idx);
3490db7b5426Sblueswir1 #endif
3491db7b5426Sblueswir1 
3492f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3493f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3494f6405247SRichard Henderson     return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3495db7b5426Sblueswir1 }
3496db7b5426Sblueswir1 
3497c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3498db7b5426Sblueswir1                                      uint32_t value, unsigned int len)
3499db7b5426Sblueswir1 {
3500f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3501db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3502f6405247SRichard Henderson     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3503f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3504db7b5426Sblueswir1 #endif
3505f6405247SRichard Henderson 
3506f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3507f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3508f6405247SRichard Henderson     io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3509db7b5426Sblueswir1 }
3510db7b5426Sblueswir1 
3511c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3512db7b5426Sblueswir1 {
3513db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3514db7b5426Sblueswir1 }
3515db7b5426Sblueswir1 
3516c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3517db7b5426Sblueswir1                             uint32_t value)
3518db7b5426Sblueswir1 {
3519db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3520db7b5426Sblueswir1 }
3521db7b5426Sblueswir1 
3522c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3523db7b5426Sblueswir1 {
3524db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3525db7b5426Sblueswir1 }
3526db7b5426Sblueswir1 
3527c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3528db7b5426Sblueswir1                             uint32_t value)
3529db7b5426Sblueswir1 {
3530db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3531db7b5426Sblueswir1 }
3532db7b5426Sblueswir1 
3533c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3534db7b5426Sblueswir1 {
3535db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3536db7b5426Sblueswir1 }
3537db7b5426Sblueswir1 
3538f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr,
3539f6405247SRichard Henderson                             uint32_t value)
3540db7b5426Sblueswir1 {
3541db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3542db7b5426Sblueswir1 }
3543db7b5426Sblueswir1 
3544d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3545db7b5426Sblueswir1     &subpage_readb,
3546db7b5426Sblueswir1     &subpage_readw,
3547db7b5426Sblueswir1     &subpage_readl,
3548db7b5426Sblueswir1 };
3549db7b5426Sblueswir1 
3550d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3551db7b5426Sblueswir1     &subpage_writeb,
3552db7b5426Sblueswir1     &subpage_writew,
3553db7b5426Sblueswir1     &subpage_writel,
3554db7b5426Sblueswir1 };
3555db7b5426Sblueswir1 
3556c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3557c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3558db7b5426Sblueswir1 {
3559db7b5426Sblueswir1     int idx, eidx;
3560db7b5426Sblueswir1 
3561db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3562db7b5426Sblueswir1         return -1;
3563db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3564db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3565db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
35660bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3567db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3568db7b5426Sblueswir1 #endif
356995c318f5SGleb Natapov     if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
357095c318f5SGleb Natapov         memory = IO_MEM_UNASSIGNED;
3571f6405247SRichard Henderson     memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3572db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
3573f6405247SRichard Henderson         mmio->sub_io_index[idx] = memory;
3574f6405247SRichard Henderson         mmio->region_offset[idx] = region_offset;
3575db7b5426Sblueswir1     }
3576db7b5426Sblueswir1 
3577db7b5426Sblueswir1     return 0;
3578db7b5426Sblueswir1 }
3579db7b5426Sblueswir1 
3580f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3581f6405247SRichard Henderson                                 ram_addr_t orig_memory,
3582f6405247SRichard Henderson                                 ram_addr_t region_offset)
3583db7b5426Sblueswir1 {
3584c227f099SAnthony Liguori     subpage_t *mmio;
3585db7b5426Sblueswir1     int subpage_memory;
3586db7b5426Sblueswir1 
3587c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
35881eec614bSaliguori 
3589db7b5426Sblueswir1     mmio->base = base;
35902507c12aSAlexander Graf     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
35912507c12aSAlexander Graf                                             DEVICE_NATIVE_ENDIAN);
3592db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3593db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3594db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3595db7b5426Sblueswir1 #endif
3596db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
3597f6405247SRichard Henderson     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3598db7b5426Sblueswir1 
3599db7b5426Sblueswir1     return mmio;
3600db7b5426Sblueswir1 }
3601db7b5426Sblueswir1 
360288715657Saliguori static int get_free_io_mem_idx(void)
360388715657Saliguori {
360488715657Saliguori     int i;
360588715657Saliguori 
360688715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
360788715657Saliguori         if (!io_mem_used[i]) {
360888715657Saliguori             io_mem_used[i] = 1;
360988715657Saliguori             return i;
361088715657Saliguori         }
3611c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
361288715657Saliguori     return -1;
361388715657Saliguori }
361488715657Saliguori 
3615dd310534SAlexander Graf /*
3616dd310534SAlexander Graf  * Usually, devices operate in little endian mode. There are devices out
3617dd310534SAlexander Graf  * there that operate in big endian too. Each device gets byte swapped
3618dd310534SAlexander Graf  * mmio if plugged onto a CPU that does the other endianness.
3619dd310534SAlexander Graf  *
3620dd310534SAlexander Graf  * CPU          Device           swap?
3621dd310534SAlexander Graf  *
3622dd310534SAlexander Graf  * little       little           no
3623dd310534SAlexander Graf  * little       big              yes
3624dd310534SAlexander Graf  * big          little           yes
3625dd310534SAlexander Graf  * big          big              no
3626dd310534SAlexander Graf  */
3627dd310534SAlexander Graf 
3628dd310534SAlexander Graf typedef struct SwapEndianContainer {
3629dd310534SAlexander Graf     CPUReadMemoryFunc *read[3];
3630dd310534SAlexander Graf     CPUWriteMemoryFunc *write[3];
3631dd310534SAlexander Graf     void *opaque;
3632dd310534SAlexander Graf } SwapEndianContainer;
3633dd310534SAlexander Graf 
3634dd310534SAlexander Graf static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3635dd310534SAlexander Graf {
3636dd310534SAlexander Graf     uint32_t val;
3637dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3638dd310534SAlexander Graf     val = c->read[0](c->opaque, addr);
3639dd310534SAlexander Graf     return val;
3640dd310534SAlexander Graf }
3641dd310534SAlexander Graf 
3642dd310534SAlexander Graf static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3643dd310534SAlexander Graf {
3644dd310534SAlexander Graf     uint32_t val;
3645dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3646dd310534SAlexander Graf     val = bswap16(c->read[1](c->opaque, addr));
3647dd310534SAlexander Graf     return val;
3648dd310534SAlexander Graf }
3649dd310534SAlexander Graf 
3650dd310534SAlexander Graf static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3651dd310534SAlexander Graf {
3652dd310534SAlexander Graf     uint32_t val;
3653dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3654dd310534SAlexander Graf     val = bswap32(c->read[2](c->opaque, addr));
3655dd310534SAlexander Graf     return val;
3656dd310534SAlexander Graf }
3657dd310534SAlexander Graf 
3658dd310534SAlexander Graf static CPUReadMemoryFunc * const swapendian_readfn[3]={
3659dd310534SAlexander Graf     swapendian_mem_readb,
3660dd310534SAlexander Graf     swapendian_mem_readw,
3661dd310534SAlexander Graf     swapendian_mem_readl
3662dd310534SAlexander Graf };
3663dd310534SAlexander Graf 
3664dd310534SAlexander Graf static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3665dd310534SAlexander Graf                                   uint32_t val)
3666dd310534SAlexander Graf {
3667dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3668dd310534SAlexander Graf     c->write[0](c->opaque, addr, val);
3669dd310534SAlexander Graf }
3670dd310534SAlexander Graf 
3671dd310534SAlexander Graf static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3672dd310534SAlexander Graf                                   uint32_t val)
3673dd310534SAlexander Graf {
3674dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3675dd310534SAlexander Graf     c->write[1](c->opaque, addr, bswap16(val));
3676dd310534SAlexander Graf }
3677dd310534SAlexander Graf 
3678dd310534SAlexander Graf static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3679dd310534SAlexander Graf                                   uint32_t val)
3680dd310534SAlexander Graf {
3681dd310534SAlexander Graf     SwapEndianContainer *c = opaque;
3682dd310534SAlexander Graf     c->write[2](c->opaque, addr, bswap32(val));
3683dd310534SAlexander Graf }
3684dd310534SAlexander Graf 
3685dd310534SAlexander Graf static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3686dd310534SAlexander Graf     swapendian_mem_writeb,
3687dd310534SAlexander Graf     swapendian_mem_writew,
3688dd310534SAlexander Graf     swapendian_mem_writel
3689dd310534SAlexander Graf };
3690dd310534SAlexander Graf 
3691dd310534SAlexander Graf static void swapendian_init(int io_index)
3692dd310534SAlexander Graf {
3693dd310534SAlexander Graf     SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3694dd310534SAlexander Graf     int i;
3695dd310534SAlexander Graf 
3696dd310534SAlexander Graf     /* Swap mmio for big endian targets */
3697dd310534SAlexander Graf     c->opaque = io_mem_opaque[io_index];
3698dd310534SAlexander Graf     for (i = 0; i < 3; i++) {
3699dd310534SAlexander Graf         c->read[i] = io_mem_read[io_index][i];
3700dd310534SAlexander Graf         c->write[i] = io_mem_write[io_index][i];
3701dd310534SAlexander Graf 
3702dd310534SAlexander Graf         io_mem_read[io_index][i] = swapendian_readfn[i];
3703dd310534SAlexander Graf         io_mem_write[io_index][i] = swapendian_writefn[i];
3704dd310534SAlexander Graf     }
3705dd310534SAlexander Graf     io_mem_opaque[io_index] = c;
3706dd310534SAlexander Graf }
3707dd310534SAlexander Graf 
3708dd310534SAlexander Graf static void swapendian_del(int io_index)
3709dd310534SAlexander Graf {
3710dd310534SAlexander Graf     if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3711dd310534SAlexander Graf         qemu_free(io_mem_opaque[io_index]);
3712dd310534SAlexander Graf     }
3713dd310534SAlexander Graf }
3714dd310534SAlexander Graf 
371533417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
371633417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
37170b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
37183ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
37194254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
37204254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
37214254fab8Sblueswir1    returned if error. */
37221eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3723d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3724d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3725dd310534SAlexander Graf                                         void *opaque, enum device_endian endian)
372633417e70Sbellard {
37273cab721dSRichard Henderson     int i;
37283cab721dSRichard Henderson 
372933417e70Sbellard     if (io_index <= 0) {
373088715657Saliguori         io_index = get_free_io_mem_idx();
373188715657Saliguori         if (io_index == -1)
373288715657Saliguori             return io_index;
373333417e70Sbellard     } else {
37341eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
373533417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
373633417e70Sbellard             return -1;
373733417e70Sbellard     }
373833417e70Sbellard 
37393cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
37403cab721dSRichard Henderson         io_mem_read[io_index][i]
37413cab721dSRichard Henderson             = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
37423cab721dSRichard Henderson     }
37433cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
37443cab721dSRichard Henderson         io_mem_write[io_index][i]
37453cab721dSRichard Henderson             = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
37463cab721dSRichard Henderson     }
3747a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
3748f6405247SRichard Henderson 
3749dd310534SAlexander Graf     switch (endian) {
3750dd310534SAlexander Graf     case DEVICE_BIG_ENDIAN:
3751dd310534SAlexander Graf #ifndef TARGET_WORDS_BIGENDIAN
3752dd310534SAlexander Graf         swapendian_init(io_index);
3753dd310534SAlexander Graf #endif
3754dd310534SAlexander Graf         break;
3755dd310534SAlexander Graf     case DEVICE_LITTLE_ENDIAN:
3756dd310534SAlexander Graf #ifdef TARGET_WORDS_BIGENDIAN
3757dd310534SAlexander Graf         swapendian_init(io_index);
3758dd310534SAlexander Graf #endif
3759dd310534SAlexander Graf         break;
3760dd310534SAlexander Graf     case DEVICE_NATIVE_ENDIAN:
3761dd310534SAlexander Graf     default:
3762dd310534SAlexander Graf         break;
3763dd310534SAlexander Graf     }
3764dd310534SAlexander Graf 
3765f6405247SRichard Henderson     return (io_index << IO_MEM_SHIFT);
376633417e70Sbellard }
376761382a50Sbellard 
3768d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3769d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
3770dd310534SAlexander Graf                            void *opaque, enum device_endian endian)
37711eed09cbSAvi Kivity {
37722507c12aSAlexander Graf     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
37731eed09cbSAvi Kivity }
37741eed09cbSAvi Kivity 
377588715657Saliguori void cpu_unregister_io_memory(int io_table_address)
377688715657Saliguori {
377788715657Saliguori     int i;
377888715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
377988715657Saliguori 
3780dd310534SAlexander Graf     swapendian_del(io_index);
3781dd310534SAlexander Graf 
378288715657Saliguori     for (i=0;i < 3; i++) {
378388715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
378488715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
378588715657Saliguori     }
378688715657Saliguori     io_mem_opaque[io_index] = NULL;
378788715657Saliguori     io_mem_used[io_index] = 0;
378888715657Saliguori }
378988715657Saliguori 
3790e9179ce1SAvi Kivity static void io_mem_init(void)
3791e9179ce1SAvi Kivity {
3792e9179ce1SAvi Kivity     int i;
3793e9179ce1SAvi Kivity 
37942507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
37952507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
37962507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
37972507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
37982507c12aSAlexander Graf                                  unassigned_mem_write, NULL,
37992507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
38002507c12aSAlexander Graf     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
38012507c12aSAlexander Graf                                  notdirty_mem_write, NULL,
38022507c12aSAlexander Graf                                  DEVICE_NATIVE_ENDIAN);
3803e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3804e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3805e9179ce1SAvi Kivity 
3806e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
38072507c12aSAlexander Graf                                           watch_mem_write, NULL,
38082507c12aSAlexander Graf                                           DEVICE_NATIVE_ENDIAN);
3809e9179ce1SAvi Kivity }
3810e9179ce1SAvi Kivity 
3811e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3812e2eef170Spbrook 
381313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
381413eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3815a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3816a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
381713eb76e0Sbellard {
381813eb76e0Sbellard     int l, flags;
381913eb76e0Sbellard     target_ulong page;
382053a5960aSpbrook     void * p;
382113eb76e0Sbellard 
382213eb76e0Sbellard     while (len > 0) {
382313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
382413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
382513eb76e0Sbellard         if (l > len)
382613eb76e0Sbellard             l = len;
382713eb76e0Sbellard         flags = page_get_flags(page);
382813eb76e0Sbellard         if (!(flags & PAGE_VALID))
3829a68fe89cSPaul Brook             return -1;
383013eb76e0Sbellard         if (is_write) {
383113eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3832a68fe89cSPaul Brook                 return -1;
3833579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
383472fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3835a68fe89cSPaul Brook                 return -1;
383672fb7daaSaurel32             memcpy(p, buf, l);
383772fb7daaSaurel32             unlock_user(p, addr, l);
383813eb76e0Sbellard         } else {
383913eb76e0Sbellard             if (!(flags & PAGE_READ))
3840a68fe89cSPaul Brook                 return -1;
3841579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
384272fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3843a68fe89cSPaul Brook                 return -1;
384472fb7daaSaurel32             memcpy(buf, p, l);
38455b257578Saurel32             unlock_user(p, addr, 0);
384613eb76e0Sbellard         }
384713eb76e0Sbellard         len -= l;
384813eb76e0Sbellard         buf += l;
384913eb76e0Sbellard         addr += l;
385013eb76e0Sbellard     }
3851a68fe89cSPaul Brook     return 0;
385213eb76e0Sbellard }
38538df1cd07Sbellard 
385413eb76e0Sbellard #else
3855c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
385613eb76e0Sbellard                             int len, int is_write)
385713eb76e0Sbellard {
385813eb76e0Sbellard     int l, io_index;
385913eb76e0Sbellard     uint8_t *ptr;
386013eb76e0Sbellard     uint32_t val;
3861c227f099SAnthony Liguori     target_phys_addr_t page;
38628ca5692dSAnthony PERARD     ram_addr_t pd;
386392e873b9Sbellard     PhysPageDesc *p;
386413eb76e0Sbellard 
386513eb76e0Sbellard     while (len > 0) {
386613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
386713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
386813eb76e0Sbellard         if (l > len)
386913eb76e0Sbellard             l = len;
387092e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
387113eb76e0Sbellard         if (!p) {
387213eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
387313eb76e0Sbellard         } else {
387413eb76e0Sbellard             pd = p->phys_offset;
387513eb76e0Sbellard         }
387613eb76e0Sbellard 
387713eb76e0Sbellard         if (is_write) {
38783a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3879c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
388013eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38818da3ff18Spbrook                 if (p)
38826c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
38836a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
38846a00d601Sbellard                    potential bugs */
38856c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
38861c213d19Sbellard                     /* 32 bit write access */
3887c27004ecSbellard                     val = ldl_p(buf);
38886c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
388913eb76e0Sbellard                     l = 4;
38906c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
38911c213d19Sbellard                     /* 16 bit write access */
3892c27004ecSbellard                     val = lduw_p(buf);
38936c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
389413eb76e0Sbellard                     l = 2;
389513eb76e0Sbellard                 } else {
38961c213d19Sbellard                     /* 8 bit write access */
3897c27004ecSbellard                     val = ldub_p(buf);
38986c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
389913eb76e0Sbellard                     l = 1;
390013eb76e0Sbellard                 }
390113eb76e0Sbellard             } else {
39028ca5692dSAnthony PERARD                 ram_addr_t addr1;
3903b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
390413eb76e0Sbellard                 /* RAM case */
39055579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
390613eb76e0Sbellard                 memcpy(ptr, buf, l);
39073a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3908b448f2f3Sbellard                     /* invalidate code */
3909b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3910b448f2f3Sbellard                     /* set dirty bit */
3911f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3912f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
391313eb76e0Sbellard                 }
3914050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
39153a7d929eSbellard             }
391613eb76e0Sbellard         } else {
39172a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
39182a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3919c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
392013eb76e0Sbellard                 /* I/O case */
392113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
39228da3ff18Spbrook                 if (p)
39236c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
39246c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
392513eb76e0Sbellard                     /* 32 bit read access */
39266c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3927c27004ecSbellard                     stl_p(buf, val);
392813eb76e0Sbellard                     l = 4;
39296c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
393013eb76e0Sbellard                     /* 16 bit read access */
39316c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3932c27004ecSbellard                     stw_p(buf, val);
393313eb76e0Sbellard                     l = 2;
393413eb76e0Sbellard                 } else {
39351c213d19Sbellard                     /* 8 bit read access */
39366c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3937c27004ecSbellard                     stb_p(buf, val);
393813eb76e0Sbellard                     l = 1;
393913eb76e0Sbellard                 }
394013eb76e0Sbellard             } else {
394113eb76e0Sbellard                 /* RAM case */
3942050a0ddfSAnthony PERARD                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3943050a0ddfSAnthony PERARD                 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3944050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
394513eb76e0Sbellard             }
394613eb76e0Sbellard         }
394713eb76e0Sbellard         len -= l;
394813eb76e0Sbellard         buf += l;
394913eb76e0Sbellard         addr += l;
395013eb76e0Sbellard     }
395113eb76e0Sbellard }
39528df1cd07Sbellard 
3953d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3954c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3955d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3956d0ecd2aaSbellard {
3957d0ecd2aaSbellard     int l;
3958d0ecd2aaSbellard     uint8_t *ptr;
3959c227f099SAnthony Liguori     target_phys_addr_t page;
3960d0ecd2aaSbellard     unsigned long pd;
3961d0ecd2aaSbellard     PhysPageDesc *p;
3962d0ecd2aaSbellard 
3963d0ecd2aaSbellard     while (len > 0) {
3964d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3965d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3966d0ecd2aaSbellard         if (l > len)
3967d0ecd2aaSbellard             l = len;
3968d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3969d0ecd2aaSbellard         if (!p) {
3970d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3971d0ecd2aaSbellard         } else {
3972d0ecd2aaSbellard             pd = p->phys_offset;
3973d0ecd2aaSbellard         }
3974d0ecd2aaSbellard 
3975d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
39762a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
39772a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3978d0ecd2aaSbellard             /* do nothing */
3979d0ecd2aaSbellard         } else {
3980d0ecd2aaSbellard             unsigned long addr1;
3981d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3982d0ecd2aaSbellard             /* ROM/RAM case */
39835579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3984d0ecd2aaSbellard             memcpy(ptr, buf, l);
3985050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
3986d0ecd2aaSbellard         }
3987d0ecd2aaSbellard         len -= l;
3988d0ecd2aaSbellard         buf += l;
3989d0ecd2aaSbellard         addr += l;
3990d0ecd2aaSbellard     }
3991d0ecd2aaSbellard }
3992d0ecd2aaSbellard 
39936d16c2f8Saliguori typedef struct {
39946d16c2f8Saliguori     void *buffer;
3995c227f099SAnthony Liguori     target_phys_addr_t addr;
3996c227f099SAnthony Liguori     target_phys_addr_t len;
39976d16c2f8Saliguori } BounceBuffer;
39986d16c2f8Saliguori 
39996d16c2f8Saliguori static BounceBuffer bounce;
40006d16c2f8Saliguori 
4001ba223c29Saliguori typedef struct MapClient {
4002ba223c29Saliguori     void *opaque;
4003ba223c29Saliguori     void (*callback)(void *opaque);
400472cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
4005ba223c29Saliguori } MapClient;
4006ba223c29Saliguori 
400772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
400872cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
4009ba223c29Saliguori 
4010ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4011ba223c29Saliguori {
4012ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
4013ba223c29Saliguori 
4014ba223c29Saliguori     client->opaque = opaque;
4015ba223c29Saliguori     client->callback = callback;
401672cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
4017ba223c29Saliguori     return client;
4018ba223c29Saliguori }
4019ba223c29Saliguori 
4020ba223c29Saliguori void cpu_unregister_map_client(void *_client)
4021ba223c29Saliguori {
4022ba223c29Saliguori     MapClient *client = (MapClient *)_client;
4023ba223c29Saliguori 
402472cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
402534d5e948SIsaku Yamahata     qemu_free(client);
4026ba223c29Saliguori }
4027ba223c29Saliguori 
4028ba223c29Saliguori static void cpu_notify_map_clients(void)
4029ba223c29Saliguori {
4030ba223c29Saliguori     MapClient *client;
4031ba223c29Saliguori 
403272cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
403372cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
4034ba223c29Saliguori         client->callback(client->opaque);
403534d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
4036ba223c29Saliguori     }
4037ba223c29Saliguori }
4038ba223c29Saliguori 
40396d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
40406d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
40416d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
40426d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
4043ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
4044ba223c29Saliguori  * likely to succeed.
40456d16c2f8Saliguori  */
4046c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
4047c227f099SAnthony Liguori                               target_phys_addr_t *plen,
40486d16c2f8Saliguori                               int is_write)
40496d16c2f8Saliguori {
4050c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
405138bee5dcSStefano Stabellini     target_phys_addr_t todo = 0;
40526d16c2f8Saliguori     int l;
4053c227f099SAnthony Liguori     target_phys_addr_t page;
40546d16c2f8Saliguori     unsigned long pd;
40556d16c2f8Saliguori     PhysPageDesc *p;
4056f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
40578ab934f9SStefano Stabellini     ram_addr_t rlen;
40588ab934f9SStefano Stabellini     void *ret;
40596d16c2f8Saliguori 
40606d16c2f8Saliguori     while (len > 0) {
40616d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
40626d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
40636d16c2f8Saliguori         if (l > len)
40646d16c2f8Saliguori             l = len;
40656d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
40666d16c2f8Saliguori         if (!p) {
40676d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
40686d16c2f8Saliguori         } else {
40696d16c2f8Saliguori             pd = p->phys_offset;
40706d16c2f8Saliguori         }
40716d16c2f8Saliguori 
40726d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
407338bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
40746d16c2f8Saliguori                 break;
40756d16c2f8Saliguori             }
40766d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
40776d16c2f8Saliguori             bounce.addr = addr;
40786d16c2f8Saliguori             bounce.len = l;
40796d16c2f8Saliguori             if (!is_write) {
408054f7b4a3SStefan Weil                 cpu_physical_memory_read(addr, bounce.buffer, l);
40816d16c2f8Saliguori             }
408238bee5dcSStefano Stabellini 
408338bee5dcSStefano Stabellini             *plen = l;
408438bee5dcSStefano Stabellini             return bounce.buffer;
40856d16c2f8Saliguori         }
40868ab934f9SStefano Stabellini         if (!todo) {
40878ab934f9SStefano Stabellini             raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
40888ab934f9SStefano Stabellini         }
40896d16c2f8Saliguori 
40906d16c2f8Saliguori         len -= l;
40916d16c2f8Saliguori         addr += l;
409238bee5dcSStefano Stabellini         todo += l;
40936d16c2f8Saliguori     }
40948ab934f9SStefano Stabellini     rlen = todo;
40958ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
40968ab934f9SStefano Stabellini     *plen = rlen;
40978ab934f9SStefano Stabellini     return ret;
40986d16c2f8Saliguori }
40996d16c2f8Saliguori 
41006d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
41016d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
41026d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
41036d16c2f8Saliguori  */
4104c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4105c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
41066d16c2f8Saliguori {
41076d16c2f8Saliguori     if (buffer != bounce.buffer) {
41086d16c2f8Saliguori         if (is_write) {
4109e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
41106d16c2f8Saliguori             while (access_len) {
41116d16c2f8Saliguori                 unsigned l;
41126d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
41136d16c2f8Saliguori                 if (l > access_len)
41146d16c2f8Saliguori                     l = access_len;
41156d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
41166d16c2f8Saliguori                     /* invalidate code */
41176d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
41186d16c2f8Saliguori                     /* set dirty bit */
4119f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
4120f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
41216d16c2f8Saliguori                 }
41226d16c2f8Saliguori                 addr1 += l;
41236d16c2f8Saliguori                 access_len -= l;
41246d16c2f8Saliguori             }
41256d16c2f8Saliguori         }
4126868bb33fSJan Kiszka         if (xen_enabled()) {
4127e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
4128050a0ddfSAnthony PERARD         }
41296d16c2f8Saliguori         return;
41306d16c2f8Saliguori     }
41316d16c2f8Saliguori     if (is_write) {
41326d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
41336d16c2f8Saliguori     }
4134f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
41356d16c2f8Saliguori     bounce.buffer = NULL;
4136ba223c29Saliguori     cpu_notify_map_clients();
41376d16c2f8Saliguori }
4138d0ecd2aaSbellard 
41398df1cd07Sbellard /* warning: addr must be aligned */
41401e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
41411e78bcc1SAlexander Graf                                          enum device_endian endian)
41428df1cd07Sbellard {
41438df1cd07Sbellard     int io_index;
41448df1cd07Sbellard     uint8_t *ptr;
41458df1cd07Sbellard     uint32_t val;
41468df1cd07Sbellard     unsigned long pd;
41478df1cd07Sbellard     PhysPageDesc *p;
41488df1cd07Sbellard 
41498df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
41508df1cd07Sbellard     if (!p) {
41518df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
41528df1cd07Sbellard     } else {
41538df1cd07Sbellard         pd = p->phys_offset;
41548df1cd07Sbellard     }
41558df1cd07Sbellard 
41562a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
41572a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
41588df1cd07Sbellard         /* I/O case */
41598df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
41608da3ff18Spbrook         if (p)
41618da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
41628df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
41631e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
41641e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
41651e78bcc1SAlexander Graf             val = bswap32(val);
41661e78bcc1SAlexander Graf         }
41671e78bcc1SAlexander Graf #else
41681e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
41691e78bcc1SAlexander Graf             val = bswap32(val);
41701e78bcc1SAlexander Graf         }
41711e78bcc1SAlexander Graf #endif
41728df1cd07Sbellard     } else {
41738df1cd07Sbellard         /* RAM case */
41745579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
41758df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
41761e78bcc1SAlexander Graf         switch (endian) {
41771e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
41781e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
41791e78bcc1SAlexander Graf             break;
41801e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
41811e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
41821e78bcc1SAlexander Graf             break;
41831e78bcc1SAlexander Graf         default:
41848df1cd07Sbellard             val = ldl_p(ptr);
41851e78bcc1SAlexander Graf             break;
41861e78bcc1SAlexander Graf         }
41878df1cd07Sbellard     }
41888df1cd07Sbellard     return val;
41898df1cd07Sbellard }
41908df1cd07Sbellard 
41911e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr)
41921e78bcc1SAlexander Graf {
41931e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
41941e78bcc1SAlexander Graf }
41951e78bcc1SAlexander Graf 
41961e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr)
41971e78bcc1SAlexander Graf {
41981e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
41991e78bcc1SAlexander Graf }
42001e78bcc1SAlexander Graf 
42011e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr)
42021e78bcc1SAlexander Graf {
42031e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
42041e78bcc1SAlexander Graf }
42051e78bcc1SAlexander Graf 
420684b7b8e7Sbellard /* warning: addr must be aligned */
42071e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
42081e78bcc1SAlexander Graf                                          enum device_endian endian)
420984b7b8e7Sbellard {
421084b7b8e7Sbellard     int io_index;
421184b7b8e7Sbellard     uint8_t *ptr;
421284b7b8e7Sbellard     uint64_t val;
421384b7b8e7Sbellard     unsigned long pd;
421484b7b8e7Sbellard     PhysPageDesc *p;
421584b7b8e7Sbellard 
421684b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
421784b7b8e7Sbellard     if (!p) {
421884b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
421984b7b8e7Sbellard     } else {
422084b7b8e7Sbellard         pd = p->phys_offset;
422184b7b8e7Sbellard     }
422284b7b8e7Sbellard 
42232a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
42242a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
422584b7b8e7Sbellard         /* I/O case */
422684b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
42278da3ff18Spbrook         if (p)
42288da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
42291e78bcc1SAlexander Graf 
42301e78bcc1SAlexander Graf         /* XXX This is broken when device endian != cpu endian.
42311e78bcc1SAlexander Graf                Fix and add "endian" variable check */
423284b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
423384b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
423484b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
423584b7b8e7Sbellard #else
423684b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
423784b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
423884b7b8e7Sbellard #endif
423984b7b8e7Sbellard     } else {
424084b7b8e7Sbellard         /* RAM case */
42415579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
424284b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
42431e78bcc1SAlexander Graf         switch (endian) {
42441e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
42451e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
42461e78bcc1SAlexander Graf             break;
42471e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
42481e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
42491e78bcc1SAlexander Graf             break;
42501e78bcc1SAlexander Graf         default:
425184b7b8e7Sbellard             val = ldq_p(ptr);
42521e78bcc1SAlexander Graf             break;
42531e78bcc1SAlexander Graf         }
425484b7b8e7Sbellard     }
425584b7b8e7Sbellard     return val;
425684b7b8e7Sbellard }
425784b7b8e7Sbellard 
42581e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr)
42591e78bcc1SAlexander Graf {
42601e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
42611e78bcc1SAlexander Graf }
42621e78bcc1SAlexander Graf 
42631e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr)
42641e78bcc1SAlexander Graf {
42651e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
42661e78bcc1SAlexander Graf }
42671e78bcc1SAlexander Graf 
42681e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr)
42691e78bcc1SAlexander Graf {
42701e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
42711e78bcc1SAlexander Graf }
42721e78bcc1SAlexander Graf 
4273aab33094Sbellard /* XXX: optimize */
4274c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
4275aab33094Sbellard {
4276aab33094Sbellard     uint8_t val;
4277aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
4278aab33094Sbellard     return val;
4279aab33094Sbellard }
4280aab33094Sbellard 
4281733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
42821e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
42831e78bcc1SAlexander Graf                                           enum device_endian endian)
4284aab33094Sbellard {
4285733f0b02SMichael S. Tsirkin     int io_index;
4286733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4287733f0b02SMichael S. Tsirkin     uint64_t val;
4288733f0b02SMichael S. Tsirkin     unsigned long pd;
4289733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4290733f0b02SMichael S. Tsirkin 
4291733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4292733f0b02SMichael S. Tsirkin     if (!p) {
4293733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4294733f0b02SMichael S. Tsirkin     } else {
4295733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4296733f0b02SMichael S. Tsirkin     }
4297733f0b02SMichael S. Tsirkin 
4298733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4299733f0b02SMichael S. Tsirkin         !(pd & IO_MEM_ROMD)) {
4300733f0b02SMichael S. Tsirkin         /* I/O case */
4301733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4302733f0b02SMichael S. Tsirkin         if (p)
4303733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4304733f0b02SMichael S. Tsirkin         val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
43051e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
43061e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
43071e78bcc1SAlexander Graf             val = bswap16(val);
43081e78bcc1SAlexander Graf         }
43091e78bcc1SAlexander Graf #else
43101e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
43111e78bcc1SAlexander Graf             val = bswap16(val);
43121e78bcc1SAlexander Graf         }
43131e78bcc1SAlexander Graf #endif
4314733f0b02SMichael S. Tsirkin     } else {
4315733f0b02SMichael S. Tsirkin         /* RAM case */
4316733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4317733f0b02SMichael S. Tsirkin             (addr & ~TARGET_PAGE_MASK);
43181e78bcc1SAlexander Graf         switch (endian) {
43191e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
43201e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
43211e78bcc1SAlexander Graf             break;
43221e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
43231e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
43241e78bcc1SAlexander Graf             break;
43251e78bcc1SAlexander Graf         default:
4326733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
43271e78bcc1SAlexander Graf             break;
43281e78bcc1SAlexander Graf         }
4329733f0b02SMichael S. Tsirkin     }
4330733f0b02SMichael S. Tsirkin     return val;
4331aab33094Sbellard }
4332aab33094Sbellard 
43331e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr)
43341e78bcc1SAlexander Graf {
43351e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
43361e78bcc1SAlexander Graf }
43371e78bcc1SAlexander Graf 
43381e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr)
43391e78bcc1SAlexander Graf {
43401e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
43411e78bcc1SAlexander Graf }
43421e78bcc1SAlexander Graf 
43431e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr)
43441e78bcc1SAlexander Graf {
43451e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
43461e78bcc1SAlexander Graf }
43471e78bcc1SAlexander Graf 
43488df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
43498df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
43508df1cd07Sbellard    bits are used to track modified PTEs */
4351c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
43528df1cd07Sbellard {
43538df1cd07Sbellard     int io_index;
43548df1cd07Sbellard     uint8_t *ptr;
43558df1cd07Sbellard     unsigned long pd;
43568df1cd07Sbellard     PhysPageDesc *p;
43578df1cd07Sbellard 
43588df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
43598df1cd07Sbellard     if (!p) {
43608df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
43618df1cd07Sbellard     } else {
43628df1cd07Sbellard         pd = p->phys_offset;
43638df1cd07Sbellard     }
43648df1cd07Sbellard 
43653a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
43668df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
43678da3ff18Spbrook         if (p)
43688da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
43698df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
43708df1cd07Sbellard     } else {
437174576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
43725579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
43738df1cd07Sbellard         stl_p(ptr, val);
437474576198Saliguori 
437574576198Saliguori         if (unlikely(in_migration)) {
437674576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
437774576198Saliguori                 /* invalidate code */
437874576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
437974576198Saliguori                 /* set dirty bit */
4380f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
4381f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
438274576198Saliguori             }
438374576198Saliguori         }
43848df1cd07Sbellard     }
43858df1cd07Sbellard }
43868df1cd07Sbellard 
4387c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4388bc98a7efSj_mayer {
4389bc98a7efSj_mayer     int io_index;
4390bc98a7efSj_mayer     uint8_t *ptr;
4391bc98a7efSj_mayer     unsigned long pd;
4392bc98a7efSj_mayer     PhysPageDesc *p;
4393bc98a7efSj_mayer 
4394bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4395bc98a7efSj_mayer     if (!p) {
4396bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
4397bc98a7efSj_mayer     } else {
4398bc98a7efSj_mayer         pd = p->phys_offset;
4399bc98a7efSj_mayer     }
4400bc98a7efSj_mayer 
4401bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4402bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
44038da3ff18Spbrook         if (p)
44048da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4405bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
4406bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4407bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4408bc98a7efSj_mayer #else
4409bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4410bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4411bc98a7efSj_mayer #endif
4412bc98a7efSj_mayer     } else {
44135579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4414bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
4415bc98a7efSj_mayer         stq_p(ptr, val);
4416bc98a7efSj_mayer     }
4417bc98a7efSj_mayer }
4418bc98a7efSj_mayer 
44198df1cd07Sbellard /* warning: addr must be aligned */
44201e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
44211e78bcc1SAlexander Graf                                      enum device_endian endian)
44228df1cd07Sbellard {
44238df1cd07Sbellard     int io_index;
44248df1cd07Sbellard     uint8_t *ptr;
44258df1cd07Sbellard     unsigned long pd;
44268df1cd07Sbellard     PhysPageDesc *p;
44278df1cd07Sbellard 
44288df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
44298df1cd07Sbellard     if (!p) {
44308df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
44318df1cd07Sbellard     } else {
44328df1cd07Sbellard         pd = p->phys_offset;
44338df1cd07Sbellard     }
44348df1cd07Sbellard 
44353a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
44368df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
44378da3ff18Spbrook         if (p)
44388da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
44391e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
44401e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
44411e78bcc1SAlexander Graf             val = bswap32(val);
44421e78bcc1SAlexander Graf         }
44431e78bcc1SAlexander Graf #else
44441e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
44451e78bcc1SAlexander Graf             val = bswap32(val);
44461e78bcc1SAlexander Graf         }
44471e78bcc1SAlexander Graf #endif
44488df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
44498df1cd07Sbellard     } else {
44508df1cd07Sbellard         unsigned long addr1;
44518df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
44528df1cd07Sbellard         /* RAM case */
44535579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
44541e78bcc1SAlexander Graf         switch (endian) {
44551e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
44561e78bcc1SAlexander Graf             stl_le_p(ptr, val);
44571e78bcc1SAlexander Graf             break;
44581e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
44591e78bcc1SAlexander Graf             stl_be_p(ptr, val);
44601e78bcc1SAlexander Graf             break;
44611e78bcc1SAlexander Graf         default:
44628df1cd07Sbellard             stl_p(ptr, val);
44631e78bcc1SAlexander Graf             break;
44641e78bcc1SAlexander Graf         }
44653a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
44668df1cd07Sbellard             /* invalidate code */
44678df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
44688df1cd07Sbellard             /* set dirty bit */
4469f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
4470f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
44718df1cd07Sbellard         }
44728df1cd07Sbellard     }
44733a7d929eSbellard }
44748df1cd07Sbellard 
44751e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val)
44761e78bcc1SAlexander Graf {
44771e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
44781e78bcc1SAlexander Graf }
44791e78bcc1SAlexander Graf 
44801e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val)
44811e78bcc1SAlexander Graf {
44821e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
44831e78bcc1SAlexander Graf }
44841e78bcc1SAlexander Graf 
44851e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val)
44861e78bcc1SAlexander Graf {
44871e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
44881e78bcc1SAlexander Graf }
44891e78bcc1SAlexander Graf 
4490aab33094Sbellard /* XXX: optimize */
4491c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
4492aab33094Sbellard {
4493aab33094Sbellard     uint8_t v = val;
4494aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
4495aab33094Sbellard }
4496aab33094Sbellard 
4497733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
44981e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
44991e78bcc1SAlexander Graf                                      enum device_endian endian)
4500aab33094Sbellard {
4501733f0b02SMichael S. Tsirkin     int io_index;
4502733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4503733f0b02SMichael S. Tsirkin     unsigned long pd;
4504733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
4505733f0b02SMichael S. Tsirkin 
4506733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
4507733f0b02SMichael S. Tsirkin     if (!p) {
4508733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
4509733f0b02SMichael S. Tsirkin     } else {
4510733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
4511733f0b02SMichael S. Tsirkin     }
4512733f0b02SMichael S. Tsirkin 
4513733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4514733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4515733f0b02SMichael S. Tsirkin         if (p)
4516733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
45171e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
45181e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
45191e78bcc1SAlexander Graf             val = bswap16(val);
45201e78bcc1SAlexander Graf         }
45211e78bcc1SAlexander Graf #else
45221e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
45231e78bcc1SAlexander Graf             val = bswap16(val);
45241e78bcc1SAlexander Graf         }
45251e78bcc1SAlexander Graf #endif
4526733f0b02SMichael S. Tsirkin         io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4527733f0b02SMichael S. Tsirkin     } else {
4528733f0b02SMichael S. Tsirkin         unsigned long addr1;
4529733f0b02SMichael S. Tsirkin         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4530733f0b02SMichael S. Tsirkin         /* RAM case */
4531733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
45321e78bcc1SAlexander Graf         switch (endian) {
45331e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
45341e78bcc1SAlexander Graf             stw_le_p(ptr, val);
45351e78bcc1SAlexander Graf             break;
45361e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
45371e78bcc1SAlexander Graf             stw_be_p(ptr, val);
45381e78bcc1SAlexander Graf             break;
45391e78bcc1SAlexander Graf         default:
4540733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
45411e78bcc1SAlexander Graf             break;
45421e78bcc1SAlexander Graf         }
4543733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
4544733f0b02SMichael S. Tsirkin             /* invalidate code */
4545733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4546733f0b02SMichael S. Tsirkin             /* set dirty bit */
4547733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
4548733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
4549733f0b02SMichael S. Tsirkin         }
4550733f0b02SMichael S. Tsirkin     }
4551aab33094Sbellard }
4552aab33094Sbellard 
45531e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val)
45541e78bcc1SAlexander Graf {
45551e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
45561e78bcc1SAlexander Graf }
45571e78bcc1SAlexander Graf 
45581e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val)
45591e78bcc1SAlexander Graf {
45601e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
45611e78bcc1SAlexander Graf }
45621e78bcc1SAlexander Graf 
45631e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val)
45641e78bcc1SAlexander Graf {
45651e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
45661e78bcc1SAlexander Graf }
45671e78bcc1SAlexander Graf 
4568aab33094Sbellard /* XXX: optimize */
4569c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4570aab33094Sbellard {
4571aab33094Sbellard     val = tswap64(val);
457271d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4573aab33094Sbellard }
4574aab33094Sbellard 
45751e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val)
45761e78bcc1SAlexander Graf {
45771e78bcc1SAlexander Graf     val = cpu_to_le64(val);
45781e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
45791e78bcc1SAlexander Graf }
45801e78bcc1SAlexander Graf 
45811e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val)
45821e78bcc1SAlexander Graf {
45831e78bcc1SAlexander Graf     val = cpu_to_be64(val);
45841e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
45851e78bcc1SAlexander Graf }
45861e78bcc1SAlexander Graf 
45875e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
4588b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4589b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
459013eb76e0Sbellard {
459113eb76e0Sbellard     int l;
4592c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
45939b3c35e0Sj_mayer     target_ulong page;
459413eb76e0Sbellard 
459513eb76e0Sbellard     while (len > 0) {
459613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
459713eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
459813eb76e0Sbellard         /* if no physical page mapped, return an error */
459913eb76e0Sbellard         if (phys_addr == -1)
460013eb76e0Sbellard             return -1;
460113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
460213eb76e0Sbellard         if (l > len)
460313eb76e0Sbellard             l = len;
46045e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
46055e2972fdSaliguori         if (is_write)
46065e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
46075e2972fdSaliguori         else
46085e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
460913eb76e0Sbellard         len -= l;
461013eb76e0Sbellard         buf += l;
461113eb76e0Sbellard         addr += l;
461213eb76e0Sbellard     }
461313eb76e0Sbellard     return 0;
461413eb76e0Sbellard }
4615a68fe89cSPaul Brook #endif
461613eb76e0Sbellard 
46172e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
46182e70f6efSpbrook    must be at the end of the TB */
46192e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
46202e70f6efSpbrook {
46212e70f6efSpbrook     TranslationBlock *tb;
46222e70f6efSpbrook     uint32_t n, cflags;
46232e70f6efSpbrook     target_ulong pc, cs_base;
46242e70f6efSpbrook     uint64_t flags;
46252e70f6efSpbrook 
46262e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
46272e70f6efSpbrook     if (!tb) {
46282e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
46292e70f6efSpbrook                   retaddr);
46302e70f6efSpbrook     }
46312e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
4632618ba8e6SStefan Weil     cpu_restore_state(tb, env, (unsigned long)retaddr);
46332e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4634bf20dc07Sths        occurred.  */
46352e70f6efSpbrook     n = n - env->icount_decr.u16.low;
46362e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
46372e70f6efSpbrook     n++;
46382e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
46392e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4640bf20dc07Sths        the first instruction in a TB then re-execute the preceding
46412e70f6efSpbrook        branch.  */
46422e70f6efSpbrook #if defined(TARGET_MIPS)
46432e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
46442e70f6efSpbrook         env->active_tc.PC -= 4;
46452e70f6efSpbrook         env->icount_decr.u16.low++;
46462e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
46472e70f6efSpbrook     }
46482e70f6efSpbrook #elif defined(TARGET_SH4)
46492e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
46502e70f6efSpbrook             && n > 1) {
46512e70f6efSpbrook         env->pc -= 2;
46522e70f6efSpbrook         env->icount_decr.u16.low++;
46532e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
46542e70f6efSpbrook     }
46552e70f6efSpbrook #endif
46562e70f6efSpbrook     /* This should never happen.  */
46572e70f6efSpbrook     if (n > CF_COUNT_MASK)
46582e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
46592e70f6efSpbrook 
46602e70f6efSpbrook     cflags = n | CF_LAST_IO;
46612e70f6efSpbrook     pc = tb->pc;
46622e70f6efSpbrook     cs_base = tb->cs_base;
46632e70f6efSpbrook     flags = tb->flags;
46642e70f6efSpbrook     tb_phys_invalidate(tb, -1);
46652e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
46662e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
46672e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4668bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
46692e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
46702e70f6efSpbrook        repeating the fault, which is horribly inefficient.
46712e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
46722e70f6efSpbrook        second new TB.  */
46732e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
46742e70f6efSpbrook }
46752e70f6efSpbrook 
4676b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4677b3755a91SPaul Brook 
4678055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4679e3db7226Sbellard {
4680e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4681e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4682e3db7226Sbellard     TranslationBlock *tb;
4683e3db7226Sbellard 
4684e3db7226Sbellard     target_code_size = 0;
4685e3db7226Sbellard     max_target_code_size = 0;
4686e3db7226Sbellard     cross_page = 0;
4687e3db7226Sbellard     direct_jmp_count = 0;
4688e3db7226Sbellard     direct_jmp2_count = 0;
4689e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4690e3db7226Sbellard         tb = &tbs[i];
4691e3db7226Sbellard         target_code_size += tb->size;
4692e3db7226Sbellard         if (tb->size > max_target_code_size)
4693e3db7226Sbellard             max_target_code_size = tb->size;
4694e3db7226Sbellard         if (tb->page_addr[1] != -1)
4695e3db7226Sbellard             cross_page++;
4696e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4697e3db7226Sbellard             direct_jmp_count++;
4698e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4699e3db7226Sbellard                 direct_jmp2_count++;
4700e3db7226Sbellard             }
4701e3db7226Sbellard         }
4702e3db7226Sbellard     }
4703e3db7226Sbellard     /* XXX: avoid using doubles ? */
470457fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4705055403b2SStefan Weil     cpu_fprintf(f, "gen code size       %td/%ld\n",
470626a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
470726a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
470826a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4709e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4710e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4711e3db7226Sbellard                 max_target_code_size);
4712055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4713e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4714e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4715e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4716e3db7226Sbellard             cross_page,
4717e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4718e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4719e3db7226Sbellard                 direct_jmp_count,
4720e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4721e3db7226Sbellard                 direct_jmp2_count,
4722e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
472357fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4724e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4725e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4726e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4727b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4728e3db7226Sbellard }
4729e3db7226Sbellard 
473061382a50Sbellard #define MMUSUFFIX _cmmu
473161382a50Sbellard #define GETPC() NULL
473261382a50Sbellard #define env cpu_single_env
4733b769d8feSbellard #define SOFTMMU_CODE_ACCESS
473461382a50Sbellard 
473561382a50Sbellard #define SHIFT 0
473661382a50Sbellard #include "softmmu_template.h"
473761382a50Sbellard 
473861382a50Sbellard #define SHIFT 1
473961382a50Sbellard #include "softmmu_template.h"
474061382a50Sbellard 
474161382a50Sbellard #define SHIFT 2
474261382a50Sbellard #include "softmmu_template.h"
474361382a50Sbellard 
474461382a50Sbellard #define SHIFT 3
474561382a50Sbellard #include "softmmu_template.h"
474661382a50Sbellard 
474761382a50Sbellard #undef env
474861382a50Sbellard 
474961382a50Sbellard #endif
4750