xref: /qemu/system/physmem.c (revision 239fda311a6f7784bc4f732795722c909b835651)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard #include <stdlib.h>
2754936004Sbellard #include <stdio.h>
2854936004Sbellard #include <stdarg.h>
2954936004Sbellard #include <string.h>
3054936004Sbellard #include <errno.h>
3154936004Sbellard #include <unistd.h>
3254936004Sbellard #include <inttypes.h>
3354936004Sbellard 
346180a181Sbellard #include "cpu.h"
356180a181Sbellard #include "exec-all.h"
36ca10f867Saurel32 #include "qemu-common.h"
37b67d9a52Sbellard #include "tcg.h"
38b3c7724cSpbrook #include "hw/hw.h"
3974576198Saliguori #include "osdep.h"
407ba1e619Saliguori #include "kvm.h"
4129e922b6SBlue Swirl #include "qemu-timer.h"
4253a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4353a5960aSpbrook #include <qemu.h>
44fd052bf6SRiku Voipio #include <signal.h>
45f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46f01576f1SJuergen Lock #include <sys/param.h>
47f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
48f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
49f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
50f01576f1SJuergen Lock #include <sys/time.h>
51f01576f1SJuergen Lock #include <sys/proc.h>
52f01576f1SJuergen Lock #include <machine/profile.h>
53f01576f1SJuergen Lock #define _KERNEL
54f01576f1SJuergen Lock #include <sys/user.h>
55f01576f1SJuergen Lock #undef _KERNEL
56f01576f1SJuergen Lock #undef sigqueue
57f01576f1SJuergen Lock #include <libutil.h>
58f01576f1SJuergen Lock #endif
59f01576f1SJuergen Lock #endif
6053a5960aSpbrook #endif
6154936004Sbellard 
62fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6366e85a21Sbellard //#define DEBUG_FLUSH
649fa3e853Sbellard //#define DEBUG_TLB
6567d3b957Spbrook //#define DEBUG_UNASSIGNED
66fd6ce8f6Sbellard 
67fd6ce8f6Sbellard /* make various TB consistency checks */
68fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
6998857888Sbellard //#define DEBUG_TLB_CHECK
70fd6ce8f6Sbellard 
711196be37Sths //#define DEBUG_IOPORT
72db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
731196be37Sths 
7499773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7599773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7699773bd4Spbrook #undef DEBUG_TB_CHECK
7799773bd4Spbrook #endif
7899773bd4Spbrook 
799fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
809fa3e853Sbellard 
81bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8226a5f13bSbellard int code_gen_max_blocks;
839fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84bdaf78e0Sblueswir1 static int nb_tbs;
85eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
86c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87fd6ce8f6Sbellard 
88141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
89141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
90141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
91d03d860bSblueswir1  section close to code segment. */
92d03d860bSblueswir1 #define code_gen_section                                \
93d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
94d03d860bSblueswir1     __attribute__((aligned (32)))
95f8e2af11SStefan Weil #elif defined(_WIN32)
96f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
97f8e2af11SStefan Weil #define code_gen_section                                \
98f8e2af11SStefan Weil     __attribute__((aligned (16)))
99d03d860bSblueswir1 #else
100d03d860bSblueswir1 #define code_gen_section                                \
101d03d860bSblueswir1     __attribute__((aligned (32)))
102d03d860bSblueswir1 #endif
103d03d860bSblueswir1 
104d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
105bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
106bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10726a5f13bSbellard /* threshold to flush the translated code buffer */
108bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
109fd6ce8f6Sbellard uint8_t *code_gen_ptr;
110fd6ce8f6Sbellard 
111e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1129fa3e853Sbellard int phys_ram_fd;
1131ccde1cbSbellard uint8_t *phys_ram_dirty;
11474576198Saliguori static int in_migration;
11594a6b54fSpbrook 
11694a6b54fSpbrook typedef struct RAMBlock {
11794a6b54fSpbrook     uint8_t *host;
118c227f099SAnthony Liguori     ram_addr_t offset;
119c227f099SAnthony Liguori     ram_addr_t length;
12094a6b54fSpbrook     struct RAMBlock *next;
12194a6b54fSpbrook } RAMBlock;
12294a6b54fSpbrook 
12394a6b54fSpbrook static RAMBlock *ram_blocks;
12494a6b54fSpbrook /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125ccbb4d44SStuart Brady    then we can no longer assume contiguous ram offsets, and external uses
12694a6b54fSpbrook    of this variable will break.  */
127c227f099SAnthony Liguori ram_addr_t last_ram_offset;
128e2eef170Spbrook #endif
1299fa3e853Sbellard 
1306a00d601Sbellard CPUState *first_cpu;
1316a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1326a00d601Sbellard    cpu_exec() */
1336a00d601Sbellard CPUState *cpu_single_env;
1342e70f6efSpbrook /* 0 = Do not count executed instructions.
135bf20dc07Sths    1 = Precise instruction counting.
1362e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1372e70f6efSpbrook int use_icount = 0;
1382e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1392e70f6efSpbrook    include some instructions that have not yet been executed.  */
1402e70f6efSpbrook int64_t qemu_icount;
1416a00d601Sbellard 
14254936004Sbellard typedef struct PageDesc {
14392e873b9Sbellard     /* list of TBs intersecting this ram page */
144fd6ce8f6Sbellard     TranslationBlock *first_tb;
1459fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1469fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1479fa3e853Sbellard     unsigned int code_write_count;
1489fa3e853Sbellard     uint8_t *code_bitmap;
1499fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1509fa3e853Sbellard     unsigned long flags;
1519fa3e853Sbellard #endif
15254936004Sbellard } PageDesc;
15354936004Sbellard 
15441c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1555cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1565cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
15741c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
15841c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
15941c1b1c9SPaul Brook #else
1605cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
16141c1b1c9SPaul Brook #endif
162bedb69eaSj_mayer #else
1635cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
164bedb69eaSj_mayer #endif
16554936004Sbellard 
1665cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1675cd2c5b6SRichard Henderson #define L2_BITS 10
16854936004Sbellard #define L2_SIZE (1 << L2_BITS)
16954936004Sbellard 
1705cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1715cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1725cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1735cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1745cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1755cd2c5b6SRichard Henderson 
1765cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1775cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1785cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1795cd2c5b6SRichard Henderson #else
1805cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1815cd2c5b6SRichard Henderson #endif
1825cd2c5b6SRichard Henderson 
1835cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1845cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1855cd2c5b6SRichard Henderson #else
1865cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1875cd2c5b6SRichard Henderson #endif
1885cd2c5b6SRichard Henderson 
1895cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1905cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1915cd2c5b6SRichard Henderson 
1925cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1935cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1945cd2c5b6SRichard Henderson 
19583fb7adfSbellard unsigned long qemu_real_host_page_size;
19683fb7adfSbellard unsigned long qemu_host_page_bits;
19783fb7adfSbellard unsigned long qemu_host_page_size;
19883fb7adfSbellard unsigned long qemu_host_page_mask;
19954936004Sbellard 
2005cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
2015cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
2025cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
20354936004Sbellard 
204e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
20541c1b1c9SPaul Brook typedef struct PhysPageDesc {
20641c1b1c9SPaul Brook     /* offset in host memory of the page + io_index in the low bits */
20741c1b1c9SPaul Brook     ram_addr_t phys_offset;
20841c1b1c9SPaul Brook     ram_addr_t region_offset;
20941c1b1c9SPaul Brook } PhysPageDesc;
21041c1b1c9SPaul Brook 
2115cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
2125cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
2135cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
2146d9a1304SPaul Brook 
215e2eef170Spbrook static void io_mem_init(void);
216e2eef170Spbrook 
21733417e70Sbellard /* io memory support */
21833417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
21933417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2226658ffb8Spbrook static int io_mem_watch;
2236658ffb8Spbrook #endif
22433417e70Sbellard 
22534865134Sbellard /* log support */
2261e8b27caSJuha Riihimäki #ifdef WIN32
2271e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2281e8b27caSJuha Riihimäki #else
229d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2301e8b27caSJuha Riihimäki #endif
23134865134Sbellard FILE *logfile;
23234865134Sbellard int loglevel;
233e735b91cSpbrook static int log_append = 0;
23434865134Sbellard 
235e3db7226Sbellard /* statistics */
236b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
237e3db7226Sbellard static int tlb_flush_count;
238b3755a91SPaul Brook #endif
239e3db7226Sbellard static int tb_flush_count;
240e3db7226Sbellard static int tb_phys_invalidate_count;
241e3db7226Sbellard 
2427cb69caeSbellard #ifdef _WIN32
2437cb69caeSbellard static void map_exec(void *addr, long size)
2447cb69caeSbellard {
2457cb69caeSbellard     DWORD old_protect;
2467cb69caeSbellard     VirtualProtect(addr, size,
2477cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2487cb69caeSbellard 
2497cb69caeSbellard }
2507cb69caeSbellard #else
2517cb69caeSbellard static void map_exec(void *addr, long size)
2527cb69caeSbellard {
2534369415fSbellard     unsigned long start, end, page_size;
2547cb69caeSbellard 
2554369415fSbellard     page_size = getpagesize();
2567cb69caeSbellard     start = (unsigned long)addr;
2574369415fSbellard     start &= ~(page_size - 1);
2587cb69caeSbellard 
2597cb69caeSbellard     end = (unsigned long)addr + size;
2604369415fSbellard     end += page_size - 1;
2614369415fSbellard     end &= ~(page_size - 1);
2627cb69caeSbellard 
2637cb69caeSbellard     mprotect((void *)start, end - start,
2647cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2657cb69caeSbellard }
2667cb69caeSbellard #endif
2677cb69caeSbellard 
268b346ff46Sbellard static void page_init(void)
26954936004Sbellard {
27083fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
27154936004Sbellard        TARGET_PAGE_SIZE */
272c2b48b69Saliguori #ifdef _WIN32
273c2b48b69Saliguori     {
274c2b48b69Saliguori         SYSTEM_INFO system_info;
275c2b48b69Saliguori 
276c2b48b69Saliguori         GetSystemInfo(&system_info);
277c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
278c2b48b69Saliguori     }
279c2b48b69Saliguori #else
280c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
281c2b48b69Saliguori #endif
28283fb7adfSbellard     if (qemu_host_page_size == 0)
28383fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
28483fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
28583fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
28683fb7adfSbellard     qemu_host_page_bits = 0;
28783fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
28883fb7adfSbellard         qemu_host_page_bits++;
28983fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
29050a9569bSbalrog 
2912e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
29250a9569bSbalrog     {
293f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
294f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
295f01576f1SJuergen Lock         int i, cnt;
296f01576f1SJuergen Lock 
297f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
298f01576f1SJuergen Lock         if (freep) {
299f01576f1SJuergen Lock             mmap_lock();
300f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
301f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
302f01576f1SJuergen Lock 
303f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
304f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
305f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
306f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307f01576f1SJuergen Lock 
308f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
309f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
310fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
311f01576f1SJuergen Lock                     } else {
312f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313f01576f1SJuergen Lock                         endaddr = ~0ul;
314fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
315f01576f1SJuergen Lock #endif
316f01576f1SJuergen Lock                     }
317f01576f1SJuergen Lock                 }
318f01576f1SJuergen Lock             }
319f01576f1SJuergen Lock             free(freep);
320f01576f1SJuergen Lock             mmap_unlock();
321f01576f1SJuergen Lock         }
322f01576f1SJuergen Lock #else
32350a9569bSbalrog         FILE *f;
32450a9569bSbalrog 
3250776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3265cd2c5b6SRichard Henderson 
327fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
32850a9569bSbalrog         if (f) {
3295cd2c5b6SRichard Henderson             mmap_lock();
3305cd2c5b6SRichard Henderson 
33150a9569bSbalrog             do {
3325cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3335cd2c5b6SRichard Henderson                 int n;
3345cd2c5b6SRichard Henderson 
3355cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3365cd2c5b6SRichard Henderson 
3375cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3385cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3395cd2c5b6SRichard Henderson 
3405cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3415cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3425cd2c5b6SRichard Henderson                     } else {
3435cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3445cd2c5b6SRichard Henderson                     }
3455cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
34650a9569bSbalrog                 }
34750a9569bSbalrog             } while (!feof(f));
3485cd2c5b6SRichard Henderson 
34950a9569bSbalrog             fclose(f);
350c8a706feSpbrook             mmap_unlock();
35150a9569bSbalrog         }
352f01576f1SJuergen Lock #endif
3535cd2c5b6SRichard Henderson     }
35450a9569bSbalrog #endif
35554936004Sbellard }
35654936004Sbellard 
35741c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
35854936004Sbellard {
35941c1b1c9SPaul Brook     PageDesc *pd;
36041c1b1c9SPaul Brook     void **lp;
36141c1b1c9SPaul Brook     int i;
36241c1b1c9SPaul Brook 
36317e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3642e9a5713SPaul Brook     /* We can't use qemu_malloc because it may recurse into a locked mutex. */
3655cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3665cd2c5b6SRichard Henderson     do {                                                \
3675cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3685cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3695cd2c5b6SRichard Henderson     } while (0)
3705cd2c5b6SRichard Henderson #else
3715cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3725cd2c5b6SRichard Henderson     do { P = qemu_mallocz(SIZE); } while (0)
3735cd2c5b6SRichard Henderson #endif
3745cd2c5b6SRichard Henderson 
3755cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3765cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3775cd2c5b6SRichard Henderson 
3785cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3795cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3805cd2c5b6SRichard Henderson         void **p = *lp;
3815cd2c5b6SRichard Henderson 
3825cd2c5b6SRichard Henderson         if (p == NULL) {
3835cd2c5b6SRichard Henderson             if (!alloc) {
3845cd2c5b6SRichard Henderson                 return NULL;
3855cd2c5b6SRichard Henderson             }
3865cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
38754936004Sbellard             *lp = p;
3885cd2c5b6SRichard Henderson         }
3895cd2c5b6SRichard Henderson 
3905cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3915cd2c5b6SRichard Henderson     }
3925cd2c5b6SRichard Henderson 
3935cd2c5b6SRichard Henderson     pd = *lp;
3945cd2c5b6SRichard Henderson     if (pd == NULL) {
3955cd2c5b6SRichard Henderson         if (!alloc) {
3965cd2c5b6SRichard Henderson             return NULL;
3975cd2c5b6SRichard Henderson         }
3985cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3995cd2c5b6SRichard Henderson         *lp = pd;
4005cd2c5b6SRichard Henderson     }
4015cd2c5b6SRichard Henderson 
4025cd2c5b6SRichard Henderson #undef ALLOC
4035cd2c5b6SRichard Henderson 
4045cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
40554936004Sbellard }
40654936004Sbellard 
40741c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
40854936004Sbellard {
4095cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
41054936004Sbellard }
41154936004Sbellard 
4126d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
413c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
41492e873b9Sbellard {
415e3f4e2a4Spbrook     PhysPageDesc *pd;
4165cd2c5b6SRichard Henderson     void **lp;
417e3f4e2a4Spbrook     int i;
4185cd2c5b6SRichard Henderson 
4195cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
4205cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
4215cd2c5b6SRichard Henderson 
4225cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
4235cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
4245cd2c5b6SRichard Henderson         void **p = *lp;
4255cd2c5b6SRichard Henderson         if (p == NULL) {
4265cd2c5b6SRichard Henderson             if (!alloc) {
427108c49b8Sbellard                 return NULL;
4285cd2c5b6SRichard Henderson             }
4295cd2c5b6SRichard Henderson             *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
4305cd2c5b6SRichard Henderson         }
4315cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4325cd2c5b6SRichard Henderson     }
4335cd2c5b6SRichard Henderson 
4345cd2c5b6SRichard Henderson     pd = *lp;
4355cd2c5b6SRichard Henderson     if (pd == NULL) {
4365cd2c5b6SRichard Henderson         int i;
4375cd2c5b6SRichard Henderson 
4385cd2c5b6SRichard Henderson         if (!alloc) {
4395cd2c5b6SRichard Henderson             return NULL;
4405cd2c5b6SRichard Henderson         }
4415cd2c5b6SRichard Henderson 
4425cd2c5b6SRichard Henderson         *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4435cd2c5b6SRichard Henderson 
44467c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
445e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
44667c4d23cSpbrook             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
44767c4d23cSpbrook         }
44892e873b9Sbellard     }
4495cd2c5b6SRichard Henderson 
4505cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
45192e873b9Sbellard }
45292e873b9Sbellard 
453c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
45492e873b9Sbellard {
455108c49b8Sbellard     return phys_page_find_alloc(index, 0);
45692e873b9Sbellard }
45792e873b9Sbellard 
458c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
459c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4603a7d929eSbellard                                     target_ulong vaddr);
461c8a706feSpbrook #define mmap_lock() do { } while(0)
462c8a706feSpbrook #define mmap_unlock() do { } while(0)
4639fa3e853Sbellard #endif
464fd6ce8f6Sbellard 
4654369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4664369415fSbellard 
4674369415fSbellard #if defined(CONFIG_USER_ONLY)
468ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4694369415fSbellard    user mode. It will change when a dedicated libc will be used */
4704369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4714369415fSbellard #endif
4724369415fSbellard 
4734369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
474ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475ebf50fb3SAurelien Jarno                __attribute__((aligned (CODE_GEN_ALIGN)));
4764369415fSbellard #endif
4774369415fSbellard 
4788fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
47926a5f13bSbellard {
4804369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4814369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4824369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4834369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4844369415fSbellard #else
48526a5f13bSbellard     code_gen_buffer_size = tb_size;
48626a5f13bSbellard     if (code_gen_buffer_size == 0) {
4874369415fSbellard #if defined(CONFIG_USER_ONLY)
4884369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4894369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4904369415fSbellard #else
491ccbb4d44SStuart Brady         /* XXX: needs adjustments */
49294a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4934369415fSbellard #endif
49426a5f13bSbellard     }
49526a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
49626a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
49726a5f13bSbellard     /* The code gen buffer location may have constraints depending on
49826a5f13bSbellard        the host cpu and OS */
49926a5f13bSbellard #if defined(__linux__)
50026a5f13bSbellard     {
50126a5f13bSbellard         int flags;
502141ac468Sblueswir1         void *start = NULL;
503141ac468Sblueswir1 
50426a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
50526a5f13bSbellard #if defined(__x86_64__)
50626a5f13bSbellard         flags |= MAP_32BIT;
50726a5f13bSbellard         /* Cannot map more than that */
50826a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
50926a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
510141ac468Sblueswir1 #elif defined(__sparc_v9__)
511141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
512141ac468Sblueswir1         flags |= MAP_FIXED;
513141ac468Sblueswir1         start = (void *) 0x60000000UL;
514141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
515141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
5161cb0661eSbalrog #elif defined(__arm__)
51763d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
5181cb0661eSbalrog         flags |= MAP_FIXED;
5191cb0661eSbalrog         start = (void *) 0x01000000UL;
5201cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
5211cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
52226a5f13bSbellard #endif
523141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
52426a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
52526a5f13bSbellard                                flags, -1, 0);
52626a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
52726a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
52826a5f13bSbellard             exit(1);
52926a5f13bSbellard         }
53026a5f13bSbellard     }
531a167ba50SAurelien Jarno #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
53206e67a82Saliguori     {
53306e67a82Saliguori         int flags;
53406e67a82Saliguori         void *addr = NULL;
53506e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
53606e67a82Saliguori #if defined(__x86_64__)
53706e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
53806e67a82Saliguori          * 0x40000000 is free */
53906e67a82Saliguori         flags |= MAP_FIXED;
54006e67a82Saliguori         addr = (void *)0x40000000;
54106e67a82Saliguori         /* Cannot map more than that */
54206e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
54306e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
54406e67a82Saliguori #endif
54506e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
54606e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
54706e67a82Saliguori                                flags, -1, 0);
54806e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
54906e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
55006e67a82Saliguori             exit(1);
55106e67a82Saliguori         }
55206e67a82Saliguori     }
55326a5f13bSbellard #else
55426a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
55526a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
55626a5f13bSbellard #endif
5574369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
55826a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
55926a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
560239fda31SAurelien Jarno         (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
56126a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
56226a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
56326a5f13bSbellard }
56426a5f13bSbellard 
56526a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
56626a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
56726a5f13bSbellard    size. */
56826a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
56926a5f13bSbellard {
57026a5f13bSbellard     cpu_gen_init();
57126a5f13bSbellard     code_gen_alloc(tb_size);
57226a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5734369415fSbellard     page_init();
574e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
57526a5f13bSbellard     io_mem_init();
576e2eef170Spbrook #endif
5779002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
5789002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
5799002ec79SRichard Henderson        initialize the prologue now.  */
5809002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
5819002ec79SRichard Henderson #endif
58226a5f13bSbellard }
58326a5f13bSbellard 
5849656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5859656f324Spbrook 
586e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
587e7f4eff7SJuan Quintela {
588e7f4eff7SJuan Quintela     CPUState *env = opaque;
589e7f4eff7SJuan Quintela 
5903098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5913098dba0Saurel32        version_id is increased. */
5923098dba0Saurel32     env->interrupt_request &= ~0x01;
5939656f324Spbrook     tlb_flush(env, 1);
5949656f324Spbrook 
5959656f324Spbrook     return 0;
5969656f324Spbrook }
597e7f4eff7SJuan Quintela 
598e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
599e7f4eff7SJuan Quintela     .name = "cpu_common",
600e7f4eff7SJuan Quintela     .version_id = 1,
601e7f4eff7SJuan Quintela     .minimum_version_id = 1,
602e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
603e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
604e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
605e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
606e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
607e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
608e7f4eff7SJuan Quintela     }
609e7f4eff7SJuan Quintela };
6109656f324Spbrook #endif
6119656f324Spbrook 
612950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
613950f1472SGlauber Costa {
614950f1472SGlauber Costa     CPUState *env = first_cpu;
615950f1472SGlauber Costa 
616950f1472SGlauber Costa     while (env) {
617950f1472SGlauber Costa         if (env->cpu_index == cpu)
618950f1472SGlauber Costa             break;
619950f1472SGlauber Costa         env = env->next_cpu;
620950f1472SGlauber Costa     }
621950f1472SGlauber Costa 
622950f1472SGlauber Costa     return env;
623950f1472SGlauber Costa }
624950f1472SGlauber Costa 
6256a00d601Sbellard void cpu_exec_init(CPUState *env)
626fd6ce8f6Sbellard {
6276a00d601Sbellard     CPUState **penv;
6286a00d601Sbellard     int cpu_index;
6296a00d601Sbellard 
630c2764719Spbrook #if defined(CONFIG_USER_ONLY)
631c2764719Spbrook     cpu_list_lock();
632c2764719Spbrook #endif
6336a00d601Sbellard     env->next_cpu = NULL;
6346a00d601Sbellard     penv = &first_cpu;
6356a00d601Sbellard     cpu_index = 0;
6366a00d601Sbellard     while (*penv != NULL) {
6371e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6386a00d601Sbellard         cpu_index++;
6396a00d601Sbellard     }
6406a00d601Sbellard     env->cpu_index = cpu_index;
641268a362cSaliguori     env->numa_node = 0;
64272cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
64372cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
6446a00d601Sbellard     *penv = env;
645c2764719Spbrook #if defined(CONFIG_USER_ONLY)
646c2764719Spbrook     cpu_list_unlock();
647c2764719Spbrook #endif
648b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649e7f4eff7SJuan Quintela     vmstate_register(cpu_index, &vmstate_cpu_common, env);
650b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651b3c7724cSpbrook                     cpu_save, cpu_load, env);
652b3c7724cSpbrook #endif
653fd6ce8f6Sbellard }
654fd6ce8f6Sbellard 
6559fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6569fa3e853Sbellard {
6579fa3e853Sbellard     if (p->code_bitmap) {
65859817ccbSbellard         qemu_free(p->code_bitmap);
6599fa3e853Sbellard         p->code_bitmap = NULL;
6609fa3e853Sbellard     }
6619fa3e853Sbellard     p->code_write_count = 0;
6629fa3e853Sbellard }
6639fa3e853Sbellard 
6645cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
6655cd2c5b6SRichard Henderson 
6665cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
6675cd2c5b6SRichard Henderson {
6685cd2c5b6SRichard Henderson     int i;
6695cd2c5b6SRichard Henderson 
6705cd2c5b6SRichard Henderson     if (*lp == NULL) {
6715cd2c5b6SRichard Henderson         return;
6725cd2c5b6SRichard Henderson     }
6735cd2c5b6SRichard Henderson     if (level == 0) {
6745cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
6757296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
6765cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
6775cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
6785cd2c5b6SRichard Henderson         }
6795cd2c5b6SRichard Henderson     } else {
6805cd2c5b6SRichard Henderson         void **pp = *lp;
6817296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
6825cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
6835cd2c5b6SRichard Henderson         }
6845cd2c5b6SRichard Henderson     }
6855cd2c5b6SRichard Henderson }
6865cd2c5b6SRichard Henderson 
687fd6ce8f6Sbellard static void page_flush_tb(void)
688fd6ce8f6Sbellard {
6895cd2c5b6SRichard Henderson     int i;
6905cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
6915cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
692fd6ce8f6Sbellard     }
693fd6ce8f6Sbellard }
694fd6ce8f6Sbellard 
695fd6ce8f6Sbellard /* flush all the translation blocks */
696d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
6976a00d601Sbellard void tb_flush(CPUState *env1)
698fd6ce8f6Sbellard {
6996a00d601Sbellard     CPUState *env;
7000124311eSbellard #if defined(DEBUG_FLUSH)
701ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
702ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
703ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
704ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
705fd6ce8f6Sbellard #endif
70626a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
707a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
708a208e54aSpbrook 
709fd6ce8f6Sbellard     nb_tbs = 0;
7106a00d601Sbellard 
7116a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7128a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
7136a00d601Sbellard     }
7149fa3e853Sbellard 
7158a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
716fd6ce8f6Sbellard     page_flush_tb();
7179fa3e853Sbellard 
718fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
719d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
720d4e8164fSbellard        expensive */
721e3db7226Sbellard     tb_flush_count++;
722fd6ce8f6Sbellard }
723fd6ce8f6Sbellard 
724fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
725fd6ce8f6Sbellard 
726bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
727fd6ce8f6Sbellard {
728fd6ce8f6Sbellard     TranslationBlock *tb;
729fd6ce8f6Sbellard     int i;
730fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
73199773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
73299773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
733fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
734fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
7350bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7360bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
73799773bd4Spbrook                        address, (long)tb->pc, tb->size);
738fd6ce8f6Sbellard             }
739fd6ce8f6Sbellard         }
740fd6ce8f6Sbellard     }
741fd6ce8f6Sbellard }
742fd6ce8f6Sbellard 
743fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
744fd6ce8f6Sbellard static void tb_page_check(void)
745fd6ce8f6Sbellard {
746fd6ce8f6Sbellard     TranslationBlock *tb;
747fd6ce8f6Sbellard     int i, flags1, flags2;
748fd6ce8f6Sbellard 
74999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
75099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
751fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
752fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
753fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
754fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
75599773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
756fd6ce8f6Sbellard             }
757fd6ce8f6Sbellard         }
758fd6ce8f6Sbellard     }
759fd6ce8f6Sbellard }
760fd6ce8f6Sbellard 
761fd6ce8f6Sbellard #endif
762fd6ce8f6Sbellard 
763fd6ce8f6Sbellard /* invalidate one TB */
764fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
765fd6ce8f6Sbellard                              int next_offset)
766fd6ce8f6Sbellard {
767fd6ce8f6Sbellard     TranslationBlock *tb1;
768fd6ce8f6Sbellard     for(;;) {
769fd6ce8f6Sbellard         tb1 = *ptb;
770fd6ce8f6Sbellard         if (tb1 == tb) {
771fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
772fd6ce8f6Sbellard             break;
773fd6ce8f6Sbellard         }
774fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
775fd6ce8f6Sbellard     }
776fd6ce8f6Sbellard }
777fd6ce8f6Sbellard 
7789fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
7799fa3e853Sbellard {
7809fa3e853Sbellard     TranslationBlock *tb1;
7819fa3e853Sbellard     unsigned int n1;
7829fa3e853Sbellard 
7839fa3e853Sbellard     for(;;) {
7849fa3e853Sbellard         tb1 = *ptb;
7859fa3e853Sbellard         n1 = (long)tb1 & 3;
7869fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7879fa3e853Sbellard         if (tb1 == tb) {
7889fa3e853Sbellard             *ptb = tb1->page_next[n1];
7899fa3e853Sbellard             break;
7909fa3e853Sbellard         }
7919fa3e853Sbellard         ptb = &tb1->page_next[n1];
7929fa3e853Sbellard     }
7939fa3e853Sbellard }
7949fa3e853Sbellard 
795d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
796d4e8164fSbellard {
797d4e8164fSbellard     TranslationBlock *tb1, **ptb;
798d4e8164fSbellard     unsigned int n1;
799d4e8164fSbellard 
800d4e8164fSbellard     ptb = &tb->jmp_next[n];
801d4e8164fSbellard     tb1 = *ptb;
802d4e8164fSbellard     if (tb1) {
803d4e8164fSbellard         /* find tb(n) in circular list */
804d4e8164fSbellard         for(;;) {
805d4e8164fSbellard             tb1 = *ptb;
806d4e8164fSbellard             n1 = (long)tb1 & 3;
807d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
808d4e8164fSbellard             if (n1 == n && tb1 == tb)
809d4e8164fSbellard                 break;
810d4e8164fSbellard             if (n1 == 2) {
811d4e8164fSbellard                 ptb = &tb1->jmp_first;
812d4e8164fSbellard             } else {
813d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
814d4e8164fSbellard             }
815d4e8164fSbellard         }
816d4e8164fSbellard         /* now we can suppress tb(n) from the list */
817d4e8164fSbellard         *ptb = tb->jmp_next[n];
818d4e8164fSbellard 
819d4e8164fSbellard         tb->jmp_next[n] = NULL;
820d4e8164fSbellard     }
821d4e8164fSbellard }
822d4e8164fSbellard 
823d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
824d4e8164fSbellard    another TB */
825d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
826d4e8164fSbellard {
827d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
828d4e8164fSbellard }
829d4e8164fSbellard 
83041c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
831fd6ce8f6Sbellard {
8326a00d601Sbellard     CPUState *env;
833fd6ce8f6Sbellard     PageDesc *p;
8348a40a180Sbellard     unsigned int h, n1;
83541c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
8368a40a180Sbellard     TranslationBlock *tb1, *tb2;
837fd6ce8f6Sbellard 
8389fa3e853Sbellard     /* remove the TB from the hash list */
8399fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8409fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8419fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8429fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8439fa3e853Sbellard 
8449fa3e853Sbellard     /* remove the TB from the page list */
8459fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8469fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8479fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8489fa3e853Sbellard         invalidate_page_bitmap(p);
8499fa3e853Sbellard     }
8509fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8519fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8529fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8539fa3e853Sbellard         invalidate_page_bitmap(p);
8549fa3e853Sbellard     }
8559fa3e853Sbellard 
8568a40a180Sbellard     tb_invalidated_flag = 1;
8578a40a180Sbellard 
8588a40a180Sbellard     /* remove the TB from the hash list */
8598a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8606a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8616a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8626a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8636a00d601Sbellard     }
8648a40a180Sbellard 
8658a40a180Sbellard     /* suppress this TB from the two jump lists */
8668a40a180Sbellard     tb_jmp_remove(tb, 0);
8678a40a180Sbellard     tb_jmp_remove(tb, 1);
8688a40a180Sbellard 
8698a40a180Sbellard     /* suppress any remaining jumps to this TB */
8708a40a180Sbellard     tb1 = tb->jmp_first;
8718a40a180Sbellard     for(;;) {
8728a40a180Sbellard         n1 = (long)tb1 & 3;
8738a40a180Sbellard         if (n1 == 2)
8748a40a180Sbellard             break;
8758a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8768a40a180Sbellard         tb2 = tb1->jmp_next[n1];
8778a40a180Sbellard         tb_reset_jump(tb1, n1);
8788a40a180Sbellard         tb1->jmp_next[n1] = NULL;
8798a40a180Sbellard         tb1 = tb2;
8808a40a180Sbellard     }
8818a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
8828a40a180Sbellard 
883e3db7226Sbellard     tb_phys_invalidate_count++;
8849fa3e853Sbellard }
8859fa3e853Sbellard 
8869fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8879fa3e853Sbellard {
8889fa3e853Sbellard     int end, mask, end1;
8899fa3e853Sbellard 
8909fa3e853Sbellard     end = start + len;
8919fa3e853Sbellard     tab += start >> 3;
8929fa3e853Sbellard     mask = 0xff << (start & 7);
8939fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8949fa3e853Sbellard         if (start < end) {
8959fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8969fa3e853Sbellard             *tab |= mask;
8979fa3e853Sbellard         }
8989fa3e853Sbellard     } else {
8999fa3e853Sbellard         *tab++ |= mask;
9009fa3e853Sbellard         start = (start + 8) & ~7;
9019fa3e853Sbellard         end1 = end & ~7;
9029fa3e853Sbellard         while (start < end1) {
9039fa3e853Sbellard             *tab++ = 0xff;
9049fa3e853Sbellard             start += 8;
9059fa3e853Sbellard         }
9069fa3e853Sbellard         if (start < end) {
9079fa3e853Sbellard             mask = ~(0xff << (end & 7));
9089fa3e853Sbellard             *tab |= mask;
9099fa3e853Sbellard         }
9109fa3e853Sbellard     }
9119fa3e853Sbellard }
9129fa3e853Sbellard 
9139fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
9149fa3e853Sbellard {
9159fa3e853Sbellard     int n, tb_start, tb_end;
9169fa3e853Sbellard     TranslationBlock *tb;
9179fa3e853Sbellard 
918b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9199fa3e853Sbellard 
9209fa3e853Sbellard     tb = p->first_tb;
9219fa3e853Sbellard     while (tb != NULL) {
9229fa3e853Sbellard         n = (long)tb & 3;
9239fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9249fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9259fa3e853Sbellard         if (n == 0) {
9269fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9279fa3e853Sbellard                it is not a problem */
9289fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
9299fa3e853Sbellard             tb_end = tb_start + tb->size;
9309fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
9319fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
9329fa3e853Sbellard         } else {
9339fa3e853Sbellard             tb_start = 0;
9349fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9359fa3e853Sbellard         }
9369fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9379fa3e853Sbellard         tb = tb->page_next[n];
9389fa3e853Sbellard     }
9399fa3e853Sbellard }
9409fa3e853Sbellard 
9412e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9422e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9432e70f6efSpbrook                               int flags, int cflags)
944d720b93dSbellard {
945d720b93dSbellard     TranslationBlock *tb;
946d720b93dSbellard     uint8_t *tc_ptr;
94741c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
94841c1b1c9SPaul Brook     target_ulong virt_page2;
949d720b93dSbellard     int code_gen_size;
950d720b93dSbellard 
95141c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
952c27004ecSbellard     tb = tb_alloc(pc);
953d720b93dSbellard     if (!tb) {
954d720b93dSbellard         /* flush must be done */
955d720b93dSbellard         tb_flush(env);
956d720b93dSbellard         /* cannot fail at this point */
957c27004ecSbellard         tb = tb_alloc(pc);
9582e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9592e70f6efSpbrook         tb_invalidated_flag = 1;
960d720b93dSbellard     }
961d720b93dSbellard     tc_ptr = code_gen_ptr;
962d720b93dSbellard     tb->tc_ptr = tc_ptr;
963d720b93dSbellard     tb->cs_base = cs_base;
964d720b93dSbellard     tb->flags = flags;
965d720b93dSbellard     tb->cflags = cflags;
966d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
967d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
968d720b93dSbellard 
969d720b93dSbellard     /* check next page if needed */
970c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
971d720b93dSbellard     phys_page2 = -1;
972c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
97341c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
974d720b93dSbellard     }
97541c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
9762e70f6efSpbrook     return tb;
977d720b93dSbellard }
978d720b93dSbellard 
9799fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
9809fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
981d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
982d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
983d720b93dSbellard    TB if code is modified inside this TB. */
98441c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
985d720b93dSbellard                                    int is_cpu_write_access)
9869fa3e853Sbellard {
9876b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
988d720b93dSbellard     CPUState *env = cpu_single_env;
98941c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
9906b917547Saliguori     PageDesc *p;
9916b917547Saliguori     int n;
9926b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9936b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9946b917547Saliguori     TranslationBlock *current_tb = NULL;
9956b917547Saliguori     int current_tb_modified = 0;
9966b917547Saliguori     target_ulong current_pc = 0;
9976b917547Saliguori     target_ulong current_cs_base = 0;
9986b917547Saliguori     int current_flags = 0;
9996b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
10009fa3e853Sbellard 
10019fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10029fa3e853Sbellard     if (!p)
10039fa3e853Sbellard         return;
10049fa3e853Sbellard     if (!p->code_bitmap &&
1005d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1006d720b93dSbellard         is_cpu_write_access) {
10079fa3e853Sbellard         /* build code bitmap */
10089fa3e853Sbellard         build_page_bitmap(p);
10099fa3e853Sbellard     }
10109fa3e853Sbellard 
10119fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
10129fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
10139fa3e853Sbellard     tb = p->first_tb;
10149fa3e853Sbellard     while (tb != NULL) {
10159fa3e853Sbellard         n = (long)tb & 3;
10169fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
10179fa3e853Sbellard         tb_next = tb->page_next[n];
10189fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10199fa3e853Sbellard         if (n == 0) {
10209fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10219fa3e853Sbellard                it is not a problem */
10229fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
10239fa3e853Sbellard             tb_end = tb_start + tb->size;
10249fa3e853Sbellard         } else {
10259fa3e853Sbellard             tb_start = tb->page_addr[1];
10269fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10279fa3e853Sbellard         }
10289fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1029d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1030d720b93dSbellard             if (current_tb_not_found) {
1031d720b93dSbellard                 current_tb_not_found = 0;
1032d720b93dSbellard                 current_tb = NULL;
10332e70f6efSpbrook                 if (env->mem_io_pc) {
1034d720b93dSbellard                     /* now we have a real cpu fault */
10352e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1036d720b93dSbellard                 }
1037d720b93dSbellard             }
1038d720b93dSbellard             if (current_tb == tb &&
10392e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1040d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1041d720b93dSbellard                 its execution. We could be more precise by checking
1042d720b93dSbellard                 that the modification is after the current PC, but it
1043d720b93dSbellard                 would require a specialized function to partially
1044d720b93dSbellard                 restore the CPU state */
1045d720b93dSbellard 
1046d720b93dSbellard                 current_tb_modified = 1;
1047d720b93dSbellard                 cpu_restore_state(current_tb, env,
10482e70f6efSpbrook                                   env->mem_io_pc, NULL);
10496b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10506b917547Saliguori                                      &current_flags);
1051d720b93dSbellard             }
1052d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10536f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10546f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10556f5a9f7eSbellard             saved_tb = NULL;
10566f5a9f7eSbellard             if (env) {
1057ea1c1802Sbellard                 saved_tb = env->current_tb;
1058ea1c1802Sbellard                 env->current_tb = NULL;
10596f5a9f7eSbellard             }
10609fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10616f5a9f7eSbellard             if (env) {
1062ea1c1802Sbellard                 env->current_tb = saved_tb;
1063ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1064ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10659fa3e853Sbellard             }
10666f5a9f7eSbellard         }
10679fa3e853Sbellard         tb = tb_next;
10689fa3e853Sbellard     }
10699fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10709fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10719fa3e853Sbellard     if (!p->first_tb) {
10729fa3e853Sbellard         invalidate_page_bitmap(p);
1073d720b93dSbellard         if (is_cpu_write_access) {
10742e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1075d720b93dSbellard         }
1076d720b93dSbellard     }
1077d720b93dSbellard #endif
1078d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1079d720b93dSbellard     if (current_tb_modified) {
1080d720b93dSbellard         /* we generate a block containing just the instruction
1081d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1082d720b93dSbellard            itself */
1083ea1c1802Sbellard         env->current_tb = NULL;
10842e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1085d720b93dSbellard         cpu_resume_from_signal(env, NULL);
10869fa3e853Sbellard     }
10879fa3e853Sbellard #endif
10889fa3e853Sbellard }
10899fa3e853Sbellard 
10909fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
109141c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
10929fa3e853Sbellard {
10939fa3e853Sbellard     PageDesc *p;
10949fa3e853Sbellard     int offset, b;
109559817ccbSbellard #if 0
1096a4193c8aSbellard     if (1) {
109793fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10982e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1099a4193c8aSbellard                   cpu_single_env->eip,
1100a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1101a4193c8aSbellard     }
110259817ccbSbellard #endif
11039fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11049fa3e853Sbellard     if (!p)
11059fa3e853Sbellard         return;
11069fa3e853Sbellard     if (p->code_bitmap) {
11079fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
11089fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
11099fa3e853Sbellard         if (b & ((1 << len) - 1))
11109fa3e853Sbellard             goto do_invalidate;
11119fa3e853Sbellard     } else {
11129fa3e853Sbellard     do_invalidate:
1113d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
11149fa3e853Sbellard     }
11159fa3e853Sbellard }
11169fa3e853Sbellard 
11179fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
111841c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
1119d720b93dSbellard                                     unsigned long pc, void *puc)
11209fa3e853Sbellard {
11216b917547Saliguori     TranslationBlock *tb;
11229fa3e853Sbellard     PageDesc *p;
11236b917547Saliguori     int n;
1124d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
11256b917547Saliguori     TranslationBlock *current_tb = NULL;
1126d720b93dSbellard     CPUState *env = cpu_single_env;
11276b917547Saliguori     int current_tb_modified = 0;
11286b917547Saliguori     target_ulong current_pc = 0;
11296b917547Saliguori     target_ulong current_cs_base = 0;
11306b917547Saliguori     int current_flags = 0;
1131d720b93dSbellard #endif
11329fa3e853Sbellard 
11339fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
11349fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1135fd6ce8f6Sbellard     if (!p)
1136fd6ce8f6Sbellard         return;
1137fd6ce8f6Sbellard     tb = p->first_tb;
1138d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1139d720b93dSbellard     if (tb && pc != 0) {
1140d720b93dSbellard         current_tb = tb_find_pc(pc);
1141d720b93dSbellard     }
1142d720b93dSbellard #endif
1143fd6ce8f6Sbellard     while (tb != NULL) {
11449fa3e853Sbellard         n = (long)tb & 3;
11459fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1146d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1147d720b93dSbellard         if (current_tb == tb &&
11482e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1149d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1150d720b93dSbellard                    its execution. We could be more precise by checking
1151d720b93dSbellard                    that the modification is after the current PC, but it
1152d720b93dSbellard                    would require a specialized function to partially
1153d720b93dSbellard                    restore the CPU state */
1154d720b93dSbellard 
1155d720b93dSbellard             current_tb_modified = 1;
1156d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
11576b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11586b917547Saliguori                                  &current_flags);
1159d720b93dSbellard         }
1160d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11619fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11629fa3e853Sbellard         tb = tb->page_next[n];
1163fd6ce8f6Sbellard     }
1164fd6ce8f6Sbellard     p->first_tb = NULL;
1165d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1166d720b93dSbellard     if (current_tb_modified) {
1167d720b93dSbellard         /* we generate a block containing just the instruction
1168d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1169d720b93dSbellard            itself */
1170ea1c1802Sbellard         env->current_tb = NULL;
11712e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1172d720b93dSbellard         cpu_resume_from_signal(env, puc);
1173d720b93dSbellard     }
1174d720b93dSbellard #endif
1175fd6ce8f6Sbellard }
11769fa3e853Sbellard #endif
1177fd6ce8f6Sbellard 
1178fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
11799fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
118041c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1181fd6ce8f6Sbellard {
1182fd6ce8f6Sbellard     PageDesc *p;
11839fa3e853Sbellard     TranslationBlock *last_first_tb;
11849fa3e853Sbellard 
11859fa3e853Sbellard     tb->page_addr[n] = page_addr;
11865cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
11879fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11889fa3e853Sbellard     last_first_tb = p->first_tb;
11899fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11909fa3e853Sbellard     invalidate_page_bitmap(p);
11919fa3e853Sbellard 
1192107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1193d720b93dSbellard 
11949fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11959fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
119653a5960aSpbrook         target_ulong addr;
119753a5960aSpbrook         PageDesc *p2;
1198fd6ce8f6Sbellard         int prot;
1199fd6ce8f6Sbellard 
1200fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1201fd6ce8f6Sbellard            page fault + mprotect overhead) */
120253a5960aSpbrook         page_addr &= qemu_host_page_mask;
1203fd6ce8f6Sbellard         prot = 0;
120453a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
120553a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
120653a5960aSpbrook 
120753a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
120853a5960aSpbrook             if (!p2)
120953a5960aSpbrook                 continue;
121053a5960aSpbrook             prot |= p2->flags;
121153a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
121253a5960aSpbrook           }
121353a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1214fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1215fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1216ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
121753a5960aSpbrook                page_addr);
1218fd6ce8f6Sbellard #endif
1219fd6ce8f6Sbellard     }
12209fa3e853Sbellard #else
12219fa3e853Sbellard     /* if some code is already present, then the pages are already
12229fa3e853Sbellard        protected. So we handle the case where only the first TB is
12239fa3e853Sbellard        allocated in a physical page */
12249fa3e853Sbellard     if (!last_first_tb) {
12256a00d601Sbellard         tlb_protect_code(page_addr);
12269fa3e853Sbellard     }
12279fa3e853Sbellard #endif
1228d720b93dSbellard 
1229d720b93dSbellard #endif /* TARGET_HAS_SMC */
1230fd6ce8f6Sbellard }
1231fd6ce8f6Sbellard 
1232fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1233fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1234c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1235fd6ce8f6Sbellard {
1236fd6ce8f6Sbellard     TranslationBlock *tb;
1237fd6ce8f6Sbellard 
123826a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
123926a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1240d4e8164fSbellard         return NULL;
1241fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1242fd6ce8f6Sbellard     tb->pc = pc;
1243b448f2f3Sbellard     tb->cflags = 0;
1244d4e8164fSbellard     return tb;
1245d4e8164fSbellard }
1246d4e8164fSbellard 
12472e70f6efSpbrook void tb_free(TranslationBlock *tb)
12482e70f6efSpbrook {
1249bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
12502e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
12512e70f6efSpbrook        be the last one generated.  */
12522e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
12532e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
12542e70f6efSpbrook         nb_tbs--;
12552e70f6efSpbrook     }
12562e70f6efSpbrook }
12572e70f6efSpbrook 
12589fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12599fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
126041c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
126141c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1262d4e8164fSbellard {
12639fa3e853Sbellard     unsigned int h;
12649fa3e853Sbellard     TranslationBlock **ptb;
12659fa3e853Sbellard 
1266c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1267c8a706feSpbrook        before we are done.  */
1268c8a706feSpbrook     mmap_lock();
12699fa3e853Sbellard     /* add in the physical hash table */
12709fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12719fa3e853Sbellard     ptb = &tb_phys_hash[h];
12729fa3e853Sbellard     tb->phys_hash_next = *ptb;
12739fa3e853Sbellard     *ptb = tb;
1274fd6ce8f6Sbellard 
1275fd6ce8f6Sbellard     /* add in the page list */
12769fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12779fa3e853Sbellard     if (phys_page2 != -1)
12789fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12799fa3e853Sbellard     else
12809fa3e853Sbellard         tb->page_addr[1] = -1;
12819fa3e853Sbellard 
1282d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283d4e8164fSbellard     tb->jmp_next[0] = NULL;
1284d4e8164fSbellard     tb->jmp_next[1] = NULL;
1285d4e8164fSbellard 
1286d4e8164fSbellard     /* init original jump addresses */
1287d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1288d4e8164fSbellard         tb_reset_jump(tb, 0);
1289d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1290d4e8164fSbellard         tb_reset_jump(tb, 1);
12918a40a180Sbellard 
12928a40a180Sbellard #ifdef DEBUG_TB_CHECK
12938a40a180Sbellard     tb_page_check();
12948a40a180Sbellard #endif
1295c8a706feSpbrook     mmap_unlock();
1296fd6ce8f6Sbellard }
1297fd6ce8f6Sbellard 
1298a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1300a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1301a513fe19Sbellard {
1302a513fe19Sbellard     int m_min, m_max, m;
1303a513fe19Sbellard     unsigned long v;
1304a513fe19Sbellard     TranslationBlock *tb;
1305a513fe19Sbellard 
1306a513fe19Sbellard     if (nb_tbs <= 0)
1307a513fe19Sbellard         return NULL;
1308a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1309a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1310a513fe19Sbellard         return NULL;
1311a513fe19Sbellard     /* binary search (cf Knuth) */
1312a513fe19Sbellard     m_min = 0;
1313a513fe19Sbellard     m_max = nb_tbs - 1;
1314a513fe19Sbellard     while (m_min <= m_max) {
1315a513fe19Sbellard         m = (m_min + m_max) >> 1;
1316a513fe19Sbellard         tb = &tbs[m];
1317a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1318a513fe19Sbellard         if (v == tc_ptr)
1319a513fe19Sbellard             return tb;
1320a513fe19Sbellard         else if (tc_ptr < v) {
1321a513fe19Sbellard             m_max = m - 1;
1322a513fe19Sbellard         } else {
1323a513fe19Sbellard             m_min = m + 1;
1324a513fe19Sbellard         }
1325a513fe19Sbellard     }
1326a513fe19Sbellard     return &tbs[m_max];
1327a513fe19Sbellard }
13287501267eSbellard 
1329ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1330ea041c0eSbellard 
1331ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332ea041c0eSbellard {
1333ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1334ea041c0eSbellard     unsigned int n1;
1335ea041c0eSbellard 
1336ea041c0eSbellard     tb1 = tb->jmp_next[n];
1337ea041c0eSbellard     if (tb1 != NULL) {
1338ea041c0eSbellard         /* find head of list */
1339ea041c0eSbellard         for(;;) {
1340ea041c0eSbellard             n1 = (long)tb1 & 3;
1341ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342ea041c0eSbellard             if (n1 == 2)
1343ea041c0eSbellard                 break;
1344ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1345ea041c0eSbellard         }
1346ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1347ea041c0eSbellard         tb_next = tb1;
1348ea041c0eSbellard 
1349ea041c0eSbellard         /* remove tb from the jmp_first list */
1350ea041c0eSbellard         ptb = &tb_next->jmp_first;
1351ea041c0eSbellard         for(;;) {
1352ea041c0eSbellard             tb1 = *ptb;
1353ea041c0eSbellard             n1 = (long)tb1 & 3;
1354ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355ea041c0eSbellard             if (n1 == n && tb1 == tb)
1356ea041c0eSbellard                 break;
1357ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1358ea041c0eSbellard         }
1359ea041c0eSbellard         *ptb = tb->jmp_next[n];
1360ea041c0eSbellard         tb->jmp_next[n] = NULL;
1361ea041c0eSbellard 
1362ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1363ea041c0eSbellard         tb_reset_jump(tb, n);
1364ea041c0eSbellard 
13650124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1366ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1367ea041c0eSbellard     }
1368ea041c0eSbellard }
1369ea041c0eSbellard 
1370ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1371ea041c0eSbellard {
1372ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1373ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1374ea041c0eSbellard }
1375ea041c0eSbellard 
13761fddef4bSbellard #if defined(TARGET_HAS_ICE)
137794df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
137894df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
137994df27fdSPaul Brook {
138094df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
138194df27fdSPaul Brook }
138294df27fdSPaul Brook #else
1383d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384d720b93dSbellard {
1385c227f099SAnthony Liguori     target_phys_addr_t addr;
13869b3c35e0Sj_mayer     target_ulong pd;
1387c227f099SAnthony Liguori     ram_addr_t ram_addr;
1388c2f07f81Spbrook     PhysPageDesc *p;
1389d720b93dSbellard 
1390c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1391c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392c2f07f81Spbrook     if (!p) {
1393c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1394c2f07f81Spbrook     } else {
1395c2f07f81Spbrook         pd = p->phys_offset;
1396c2f07f81Spbrook     }
1397c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1398706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1399d720b93dSbellard }
1400c27004ecSbellard #endif
140194df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1402d720b93dSbellard 
1403c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1404c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405c527ee8fSPaul Brook 
1406c527ee8fSPaul Brook {
1407c527ee8fSPaul Brook }
1408c527ee8fSPaul Brook 
1409c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1411c527ee8fSPaul Brook {
1412c527ee8fSPaul Brook     return -ENOSYS;
1413c527ee8fSPaul Brook }
1414c527ee8fSPaul Brook #else
14156658ffb8Spbrook /* Add a watchpoint.  */
1416a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
14186658ffb8Spbrook {
1419b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1420c0ce998eSaliguori     CPUWatchpoint *wp;
14216658ffb8Spbrook 
1422b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426b4051334Saliguori         return -EINVAL;
1427b4051334Saliguori     }
1428a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
14296658ffb8Spbrook 
1430a1d1bb31Saliguori     wp->vaddr = addr;
1431b4051334Saliguori     wp->len_mask = len_mask;
1432a1d1bb31Saliguori     wp->flags = flags;
1433a1d1bb31Saliguori 
14342dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1435c0ce998eSaliguori     if (flags & BP_GDB)
143672cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1437c0ce998eSaliguori     else
143872cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1439a1d1bb31Saliguori 
14406658ffb8Spbrook     tlb_flush_page(env, addr);
1441a1d1bb31Saliguori 
1442a1d1bb31Saliguori     if (watchpoint)
1443a1d1bb31Saliguori         *watchpoint = wp;
1444a1d1bb31Saliguori     return 0;
14456658ffb8Spbrook }
14466658ffb8Spbrook 
1447a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1448a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449a1d1bb31Saliguori                           int flags)
14506658ffb8Spbrook {
1451b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1452a1d1bb31Saliguori     CPUWatchpoint *wp;
14536658ffb8Spbrook 
145472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1455b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14566e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1457a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14586658ffb8Spbrook             return 0;
14596658ffb8Spbrook         }
14606658ffb8Spbrook     }
1461a1d1bb31Saliguori     return -ENOENT;
14626658ffb8Spbrook }
14636658ffb8Spbrook 
1464a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1465a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466a1d1bb31Saliguori {
146772cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14687d03f82fSedgar_igl 
1469a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1470a1d1bb31Saliguori 
1471a1d1bb31Saliguori     qemu_free(watchpoint);
14727d03f82fSedgar_igl }
14737d03f82fSedgar_igl 
1474a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1475a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476a1d1bb31Saliguori {
1477c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1478a1d1bb31Saliguori 
147972cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1480a1d1bb31Saliguori         if (wp->flags & mask)
1481a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1482a1d1bb31Saliguori     }
1483c0ce998eSaliguori }
1484c527ee8fSPaul Brook #endif
1485a1d1bb31Saliguori 
1486a1d1bb31Saliguori /* Add a breakpoint.  */
1487a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14894c3a88a2Sbellard {
14901fddef4bSbellard #if defined(TARGET_HAS_ICE)
1491c0ce998eSaliguori     CPUBreakpoint *bp;
14924c3a88a2Sbellard 
1493a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14944c3a88a2Sbellard 
1495a1d1bb31Saliguori     bp->pc = pc;
1496a1d1bb31Saliguori     bp->flags = flags;
1497a1d1bb31Saliguori 
14982dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1499c0ce998eSaliguori     if (flags & BP_GDB)
150072cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1501c0ce998eSaliguori     else
150272cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1503d720b93dSbellard 
1504d720b93dSbellard     breakpoint_invalidate(env, pc);
1505a1d1bb31Saliguori 
1506a1d1bb31Saliguori     if (breakpoint)
1507a1d1bb31Saliguori         *breakpoint = bp;
15084c3a88a2Sbellard     return 0;
15094c3a88a2Sbellard #else
1510a1d1bb31Saliguori     return -ENOSYS;
15114c3a88a2Sbellard #endif
15124c3a88a2Sbellard }
15134c3a88a2Sbellard 
1514a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1515a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516a1d1bb31Saliguori {
15177d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1518a1d1bb31Saliguori     CPUBreakpoint *bp;
1519a1d1bb31Saliguori 
152072cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1521a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1522a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1523a1d1bb31Saliguori             return 0;
15247d03f82fSedgar_igl         }
1525a1d1bb31Saliguori     }
1526a1d1bb31Saliguori     return -ENOENT;
1527a1d1bb31Saliguori #else
1528a1d1bb31Saliguori     return -ENOSYS;
15297d03f82fSedgar_igl #endif
15307d03f82fSedgar_igl }
15317d03f82fSedgar_igl 
1532a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1533a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
15344c3a88a2Sbellard {
15351fddef4bSbellard #if defined(TARGET_HAS_ICE)
153672cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1537d720b93dSbellard 
1538a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1539a1d1bb31Saliguori 
1540a1d1bb31Saliguori     qemu_free(breakpoint);
1541a1d1bb31Saliguori #endif
1542a1d1bb31Saliguori }
1543a1d1bb31Saliguori 
1544a1d1bb31Saliguori /* Remove all matching breakpoints. */
1545a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546a1d1bb31Saliguori {
1547a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1548c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1549a1d1bb31Saliguori 
155072cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1551a1d1bb31Saliguori         if (bp->flags & mask)
1552a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1553c0ce998eSaliguori     }
15544c3a88a2Sbellard #endif
15554c3a88a2Sbellard }
15564c3a88a2Sbellard 
1557c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1558c33a346eSbellard    CPU loop after each instruction */
1559c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1560c33a346eSbellard {
15611fddef4bSbellard #if defined(TARGET_HAS_ICE)
1562c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1563c33a346eSbellard         env->singlestep_enabled = enabled;
1564e22a25c9Saliguori         if (kvm_enabled())
1565e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1566e22a25c9Saliguori         else {
1567ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15689fa3e853Sbellard             /* XXX: only flush what is necessary */
15690124311eSbellard             tb_flush(env);
1570c33a346eSbellard         }
1571e22a25c9Saliguori     }
1572c33a346eSbellard #endif
1573c33a346eSbellard }
1574c33a346eSbellard 
157534865134Sbellard /* enable or disable low levels log */
157634865134Sbellard void cpu_set_log(int log_flags)
157734865134Sbellard {
157834865134Sbellard     loglevel = log_flags;
157934865134Sbellard     if (loglevel && !logfile) {
158011fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
158134865134Sbellard         if (!logfile) {
158234865134Sbellard             perror(logfilename);
158334865134Sbellard             _exit(1);
158434865134Sbellard         }
15859fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15869fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15879fa3e853Sbellard         {
1588b55266b5Sblueswir1             static char logfile_buf[4096];
15899fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15909fa3e853Sbellard         }
1591bf65f53fSFilip Navara #elif !defined(_WIN32)
1592bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
159334865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15949fa3e853Sbellard #endif
1595e735b91cSpbrook         log_append = 1;
1596e735b91cSpbrook     }
1597e735b91cSpbrook     if (!loglevel && logfile) {
1598e735b91cSpbrook         fclose(logfile);
1599e735b91cSpbrook         logfile = NULL;
160034865134Sbellard     }
160134865134Sbellard }
160234865134Sbellard 
160334865134Sbellard void cpu_set_log_filename(const char *filename)
160434865134Sbellard {
160534865134Sbellard     logfilename = strdup(filename);
1606e735b91cSpbrook     if (logfile) {
1607e735b91cSpbrook         fclose(logfile);
1608e735b91cSpbrook         logfile = NULL;
1609e735b91cSpbrook     }
1610e735b91cSpbrook     cpu_set_log(loglevel);
161134865134Sbellard }
1612c33a346eSbellard 
16133098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1614ea041c0eSbellard {
1615d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1616d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1617d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1618d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16193098dba0Saurel32     TranslationBlock *tb;
1620c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16213098dba0Saurel32 
1622cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16233098dba0Saurel32     tb = env->current_tb;
16243098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16253098dba0Saurel32        all the potentially executing TB */
1626f76cfe56SRiku Voipio     if (tb) {
16273098dba0Saurel32         env->current_tb = NULL;
16283098dba0Saurel32         tb_reset_jump_recursive(tb);
16293098dba0Saurel32     }
1630cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16313098dba0Saurel32 }
16323098dba0Saurel32 
16333098dba0Saurel32 /* mask must never be zero, except for A20 change call */
16343098dba0Saurel32 void cpu_interrupt(CPUState *env, int mask)
16353098dba0Saurel32 {
16363098dba0Saurel32     int old_mask;
16373098dba0Saurel32 
16383098dba0Saurel32     old_mask = env->interrupt_request;
16393098dba0Saurel32     env->interrupt_request |= mask;
16403098dba0Saurel32 
16418edac960Saliguori #ifndef CONFIG_USER_ONLY
16428edac960Saliguori     /*
16438edac960Saliguori      * If called from iothread context, wake the target cpu in
16448edac960Saliguori      * case its halted.
16458edac960Saliguori      */
16468edac960Saliguori     if (!qemu_cpu_self(env)) {
16478edac960Saliguori         qemu_cpu_kick(env);
16488edac960Saliguori         return;
16498edac960Saliguori     }
16508edac960Saliguori #endif
16518edac960Saliguori 
16522e70f6efSpbrook     if (use_icount) {
1653266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16542e70f6efSpbrook #ifndef CONFIG_USER_ONLY
16552e70f6efSpbrook         if (!can_do_io(env)
1656be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16572e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16582e70f6efSpbrook         }
16592e70f6efSpbrook #endif
16602e70f6efSpbrook     } else {
16613098dba0Saurel32         cpu_unlink_tb(env);
1662ea041c0eSbellard     }
16632e70f6efSpbrook }
1664ea041c0eSbellard 
1665b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1666b54ad049Sbellard {
1667b54ad049Sbellard     env->interrupt_request &= ~mask;
1668b54ad049Sbellard }
1669b54ad049Sbellard 
16703098dba0Saurel32 void cpu_exit(CPUState *env)
16713098dba0Saurel32 {
16723098dba0Saurel32     env->exit_request = 1;
16733098dba0Saurel32     cpu_unlink_tb(env);
16743098dba0Saurel32 }
16753098dba0Saurel32 
1676c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1677f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1678f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1679f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1680f193c797Sbellard       "show target assembly code for each compiled TB" },
1681f193c797Sbellard     { CPU_LOG_TB_OP, "op",
168257fec1feSbellard       "show micro ops for each compiled TB" },
1683f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1684e01a1157Sblueswir1       "show micro ops "
1685e01a1157Sblueswir1 #ifdef TARGET_I386
1686e01a1157Sblueswir1       "before eflags optimization and "
1687f193c797Sbellard #endif
1688e01a1157Sblueswir1       "after liveness analysis" },
1689f193c797Sbellard     { CPU_LOG_INT, "int",
1690f193c797Sbellard       "show interrupts/exceptions in short format" },
1691f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1692f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
16939fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1694e91c8a77Sths       "show CPU state before block translation" },
1695f193c797Sbellard #ifdef TARGET_I386
1696f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1697f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1698eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1699eca1bdf4Saliguori       "show CPU state before CPU resets" },
1700f193c797Sbellard #endif
17018e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1702fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1703fd872598Sbellard       "show all i/o ports accesses" },
17048e3a9fd2Sbellard #endif
1705f193c797Sbellard     { 0, NULL, NULL },
1706f193c797Sbellard };
1707f193c797Sbellard 
1708f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1709f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1711f6f3fbcaSMichael S. Tsirkin 
1712f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1713f6f3fbcaSMichael S. Tsirkin 				  ram_addr_t size,
1714f6f3fbcaSMichael S. Tsirkin 				  ram_addr_t phys_offset)
1715f6f3fbcaSMichael S. Tsirkin {
1716f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1717f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1718f6f3fbcaSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset);
1719f6f3fbcaSMichael S. Tsirkin     }
1720f6f3fbcaSMichael S. Tsirkin }
1721f6f3fbcaSMichael S. Tsirkin 
1722f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1723f6f3fbcaSMichael S. Tsirkin 					target_phys_addr_t end)
1724f6f3fbcaSMichael S. Tsirkin {
1725f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1726f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1727f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1728f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1729f6f3fbcaSMichael S. Tsirkin             return r;
1730f6f3fbcaSMichael S. Tsirkin     }
1731f6f3fbcaSMichael S. Tsirkin     return 0;
1732f6f3fbcaSMichael S. Tsirkin }
1733f6f3fbcaSMichael S. Tsirkin 
1734f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1735f6f3fbcaSMichael S. Tsirkin {
1736f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1737f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1738f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1739f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1740f6f3fbcaSMichael S. Tsirkin             return r;
1741f6f3fbcaSMichael S. Tsirkin     }
1742f6f3fbcaSMichael S. Tsirkin     return 0;
1743f6f3fbcaSMichael S. Tsirkin }
1744f6f3fbcaSMichael S. Tsirkin 
17455cd2c5b6SRichard Henderson static void phys_page_for_each_1(CPUPhysMemoryClient *client,
17465cd2c5b6SRichard Henderson                                  int level, void **lp)
1747f6f3fbcaSMichael S. Tsirkin {
17485cd2c5b6SRichard Henderson     int i;
1749f6f3fbcaSMichael S. Tsirkin 
17505cd2c5b6SRichard Henderson     if (*lp == NULL) {
17515cd2c5b6SRichard Henderson         return;
1752f6f3fbcaSMichael S. Tsirkin     }
17535cd2c5b6SRichard Henderson     if (level == 0) {
17545cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17557296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17565cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17575cd2c5b6SRichard Henderson                 client->set_memory(client, pd[i].region_offset,
17585cd2c5b6SRichard Henderson                                    TARGET_PAGE_SIZE, pd[i].phys_offset);
1759f6f3fbcaSMichael S. Tsirkin             }
17605cd2c5b6SRichard Henderson         }
17615cd2c5b6SRichard Henderson     } else {
17625cd2c5b6SRichard Henderson         void **pp = *lp;
17637296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
17645cd2c5b6SRichard Henderson             phys_page_for_each_1(client, level - 1, pp + i);
1765f6f3fbcaSMichael S. Tsirkin         }
1766f6f3fbcaSMichael S. Tsirkin     }
1767f6f3fbcaSMichael S. Tsirkin }
1768f6f3fbcaSMichael S. Tsirkin 
1769f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1770f6f3fbcaSMichael S. Tsirkin {
17715cd2c5b6SRichard Henderson     int i;
17725cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
17735cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
17745cd2c5b6SRichard Henderson                              l1_phys_map + 1);
1775f6f3fbcaSMichael S. Tsirkin     }
1776f6f3fbcaSMichael S. Tsirkin }
1777f6f3fbcaSMichael S. Tsirkin 
1778f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779f6f3fbcaSMichael S. Tsirkin {
1780f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1782f6f3fbcaSMichael S. Tsirkin }
1783f6f3fbcaSMichael S. Tsirkin 
1784f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785f6f3fbcaSMichael S. Tsirkin {
1786f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1787f6f3fbcaSMichael S. Tsirkin }
1788f6f3fbcaSMichael S. Tsirkin #endif
1789f6f3fbcaSMichael S. Tsirkin 
1790f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1791f193c797Sbellard {
1792f193c797Sbellard     if (strlen(s2) != n)
1793f193c797Sbellard         return 0;
1794f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1795f193c797Sbellard }
1796f193c797Sbellard 
1797f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1798f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1799f193c797Sbellard {
1800c7cd6a37Sblueswir1     const CPULogItem *item;
1801f193c797Sbellard     int mask;
1802f193c797Sbellard     const char *p, *p1;
1803f193c797Sbellard 
1804f193c797Sbellard     p = str;
1805f193c797Sbellard     mask = 0;
1806f193c797Sbellard     for(;;) {
1807f193c797Sbellard         p1 = strchr(p, ',');
1808f193c797Sbellard         if (!p1)
1809f193c797Sbellard             p1 = p + strlen(p);
18108e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
18118e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
18128e3a9fd2Sbellard 			mask |= item->mask;
18138e3a9fd2Sbellard 		}
18148e3a9fd2Sbellard 	} else {
1815f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1816f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1817f193c797Sbellard                 goto found;
1818f193c797Sbellard         }
1819f193c797Sbellard         return 0;
18208e3a9fd2Sbellard 	}
1821f193c797Sbellard     found:
1822f193c797Sbellard         mask |= item->mask;
1823f193c797Sbellard         if (*p1 != ',')
1824f193c797Sbellard             break;
1825f193c797Sbellard         p = p1 + 1;
1826f193c797Sbellard     }
1827f193c797Sbellard     return mask;
1828f193c797Sbellard }
1829ea041c0eSbellard 
18307501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
18317501267eSbellard {
18327501267eSbellard     va_list ap;
1833493ae1f0Spbrook     va_list ap2;
18347501267eSbellard 
18357501267eSbellard     va_start(ap, fmt);
1836493ae1f0Spbrook     va_copy(ap2, ap);
18377501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18387501267eSbellard     vfprintf(stderr, fmt, ap);
18397501267eSbellard     fprintf(stderr, "\n");
18407501267eSbellard #ifdef TARGET_I386
18417fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
18427fe48483Sbellard #else
18437fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
18447501267eSbellard #endif
184593fcfe39Saliguori     if (qemu_log_enabled()) {
184693fcfe39Saliguori         qemu_log("qemu: fatal: ");
184793fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
184893fcfe39Saliguori         qemu_log("\n");
1849f9373291Sj_mayer #ifdef TARGET_I386
185093fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1851f9373291Sj_mayer #else
185293fcfe39Saliguori         log_cpu_state(env, 0);
1853f9373291Sj_mayer #endif
185431b1a7b4Saliguori         qemu_log_flush();
185593fcfe39Saliguori         qemu_log_close();
1856924edcaeSbalrog     }
1857493ae1f0Spbrook     va_end(ap2);
1858f9373291Sj_mayer     va_end(ap);
1859fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1860fd052bf6SRiku Voipio     {
1861fd052bf6SRiku Voipio         struct sigaction act;
1862fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1863fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1864fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1865fd052bf6SRiku Voipio     }
1866fd052bf6SRiku Voipio #endif
18677501267eSbellard     abort();
18687501267eSbellard }
18697501267eSbellard 
1870c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1871c5be9f08Sths {
187201ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1873c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1874c5be9f08Sths     int cpu_index = new_env->cpu_index;
18755a38f081Saliguori #if defined(TARGET_HAS_ICE)
18765a38f081Saliguori     CPUBreakpoint *bp;
18775a38f081Saliguori     CPUWatchpoint *wp;
18785a38f081Saliguori #endif
18795a38f081Saliguori 
1880c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
18815a38f081Saliguori 
18825a38f081Saliguori     /* Preserve chaining and index. */
1883c5be9f08Sths     new_env->next_cpu = next_cpu;
1884c5be9f08Sths     new_env->cpu_index = cpu_index;
18855a38f081Saliguori 
18865a38f081Saliguori     /* Clone all break/watchpoints.
18875a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
18885a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
188972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
189072cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
18915a38f081Saliguori #if defined(TARGET_HAS_ICE)
189272cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
18935a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
18945a38f081Saliguori     }
189572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18965a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18975a38f081Saliguori                               wp->flags, NULL);
18985a38f081Saliguori     }
18995a38f081Saliguori #endif
19005a38f081Saliguori 
1901c5be9f08Sths     return new_env;
1902c5be9f08Sths }
1903c5be9f08Sths 
19040124311eSbellard #if !defined(CONFIG_USER_ONLY)
19050124311eSbellard 
19065c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
19075c751e99Sedgar_igl {
19085c751e99Sedgar_igl     unsigned int i;
19095c751e99Sedgar_igl 
19105c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
19115c751e99Sedgar_igl        overlap the flushed page.  */
19125c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
19135c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19145c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19155c751e99Sedgar_igl 
19165c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
19175c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
19185c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
19195c751e99Sedgar_igl }
19205c751e99Sedgar_igl 
192108738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
192208738984SIgor Kovalenko     .addr_read  = -1,
192308738984SIgor Kovalenko     .addr_write = -1,
192408738984SIgor Kovalenko     .addr_code  = -1,
192508738984SIgor Kovalenko     .addend     = -1,
192608738984SIgor Kovalenko };
192708738984SIgor Kovalenko 
1928ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1929ee8b7021Sbellard    implemented yet) */
1930ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
193133417e70Sbellard {
193233417e70Sbellard     int i;
19330124311eSbellard 
19349fa3e853Sbellard #if defined(DEBUG_TLB)
19359fa3e853Sbellard     printf("tlb_flush:\n");
19369fa3e853Sbellard #endif
19370124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19380124311eSbellard        links while we are modifying them */
19390124311eSbellard     env->current_tb = NULL;
19400124311eSbellard 
194133417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1942cfde4bd9SIsaku Yamahata         int mmu_idx;
1943cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
194408738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1945cfde4bd9SIsaku Yamahata         }
194633417e70Sbellard     }
19479fa3e853Sbellard 
19488a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
19499fa3e853Sbellard 
1950d4c430a8SPaul Brook     env->tlb_flush_addr = -1;
1951d4c430a8SPaul Brook     env->tlb_flush_mask = 0;
1952e3db7226Sbellard     tlb_flush_count++;
195333417e70Sbellard }
195433417e70Sbellard 
1955274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
195661382a50Sbellard {
195784b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
195884b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
195984b7b8e7Sbellard         addr == (tlb_entry->addr_write &
196084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
196184b7b8e7Sbellard         addr == (tlb_entry->addr_code &
196284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
196308738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
196484b7b8e7Sbellard     }
196561382a50Sbellard }
196661382a50Sbellard 
19672e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
196833417e70Sbellard {
19698a40a180Sbellard     int i;
1970cfde4bd9SIsaku Yamahata     int mmu_idx;
19710124311eSbellard 
19729fa3e853Sbellard #if defined(DEBUG_TLB)
1973108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
19749fa3e853Sbellard #endif
1975d4c430a8SPaul Brook     /* Check if we need to flush due to large pages.  */
1976d4c430a8SPaul Brook     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977d4c430a8SPaul Brook #if defined(DEBUG_TLB)
1978d4c430a8SPaul Brook         printf("tlb_flush_page: forced full flush ("
1979d4c430a8SPaul Brook                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980d4c430a8SPaul Brook                env->tlb_flush_addr, env->tlb_flush_mask);
1981d4c430a8SPaul Brook #endif
1982d4c430a8SPaul Brook         tlb_flush(env, 1);
1983d4c430a8SPaul Brook         return;
1984d4c430a8SPaul Brook     }
19850124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19860124311eSbellard        links while we are modifying them */
19870124311eSbellard     env->current_tb = NULL;
198833417e70Sbellard 
198961382a50Sbellard     addr &= TARGET_PAGE_MASK;
199033417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1991cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
19930124311eSbellard 
19945c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
19959fa3e853Sbellard }
19969fa3e853Sbellard 
19979fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
19989fa3e853Sbellard    can be detected */
1999c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
200061382a50Sbellard {
20016a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
20026a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
20036a00d601Sbellard                                     CODE_DIRTY_FLAG);
20049fa3e853Sbellard }
20059fa3e853Sbellard 
20069fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
20073a7d929eSbellard    tested for self modifying code */
2008c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
20093a7d929eSbellard                                     target_ulong vaddr)
20109fa3e853Sbellard {
2011f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
20129fa3e853Sbellard }
20139fa3e853Sbellard 
20141ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
20151ccde1cbSbellard                                          unsigned long start, unsigned long length)
20161ccde1cbSbellard {
20171ccde1cbSbellard     unsigned long addr;
201884b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
201984b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
20201ccde1cbSbellard         if ((addr - start) < length) {
20210f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
20221ccde1cbSbellard         }
20231ccde1cbSbellard     }
20241ccde1cbSbellard }
20251ccde1cbSbellard 
20265579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
2027c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
20280a962c02Sbellard                                      int dirty_flags)
20291ccde1cbSbellard {
20301ccde1cbSbellard     CPUState *env;
20314f2ac237Sbellard     unsigned long length, start1;
2032f7c11b53SYoshiaki Tamura     int i;
20331ccde1cbSbellard 
20341ccde1cbSbellard     start &= TARGET_PAGE_MASK;
20351ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
20361ccde1cbSbellard 
20371ccde1cbSbellard     length = end - start;
20381ccde1cbSbellard     if (length == 0)
20391ccde1cbSbellard         return;
2040f7c11b53SYoshiaki Tamura     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2041f23db169Sbellard 
20421ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
20431ccde1cbSbellard        when accessing the range */
20445579c7f3Spbrook     start1 = (unsigned long)qemu_get_ram_ptr(start);
20455579c7f3Spbrook     /* Chek that we don't span multiple blocks - this breaks the
20465579c7f3Spbrook        address comparisons below.  */
20475579c7f3Spbrook     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
20485579c7f3Spbrook             != (end - 1) - start) {
20495579c7f3Spbrook         abort();
20505579c7f3Spbrook     }
20515579c7f3Spbrook 
20526a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2053cfde4bd9SIsaku Yamahata         int mmu_idx;
2054cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
20551ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2056cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057cfde4bd9SIsaku Yamahata                                       start1, length);
2058cfde4bd9SIsaku Yamahata         }
20596a00d601Sbellard     }
20601ccde1cbSbellard }
20611ccde1cbSbellard 
206274576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
206374576198Saliguori {
2064f6f3fbcaSMichael S. Tsirkin     int ret = 0;
206574576198Saliguori     in_migration = enable;
2066f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2067f6f3fbcaSMichael S. Tsirkin     return ret;
206874576198Saliguori }
206974576198Saliguori 
207074576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
207174576198Saliguori {
207274576198Saliguori     return in_migration;
207374576198Saliguori }
207474576198Saliguori 
2075c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
20772bec46dcSaliguori {
20787b8f3b78SMichael S. Tsirkin     int ret;
2079151f7749SJan Kiszka 
2080f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2081151f7749SJan Kiszka     return ret;
20822bec46dcSaliguori }
20832bec46dcSaliguori 
20843a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
20853a7d929eSbellard {
2086c227f099SAnthony Liguori     ram_addr_t ram_addr;
20875579c7f3Spbrook     void *p;
20883a7d929eSbellard 
208984b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
20905579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
20915579c7f3Spbrook             + tlb_entry->addend);
20925579c7f3Spbrook         ram_addr = qemu_ram_addr_from_host(p);
20933a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
20940f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
20953a7d929eSbellard         }
20963a7d929eSbellard     }
20973a7d929eSbellard }
20983a7d929eSbellard 
20993a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
21003a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
21013a7d929eSbellard {
21023a7d929eSbellard     int i;
2103cfde4bd9SIsaku Yamahata     int mmu_idx;
2104cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
21053a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2106cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2107cfde4bd9SIsaku Yamahata     }
21083a7d929eSbellard }
21093a7d929eSbellard 
21100f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
21111ccde1cbSbellard {
21120f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
21130f459d16Spbrook         tlb_entry->addr_write = vaddr;
21141ccde1cbSbellard }
21151ccde1cbSbellard 
21160f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
21170f459d16Spbrook    so that it is no longer dirty */
21180f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
21191ccde1cbSbellard {
21201ccde1cbSbellard     int i;
2121cfde4bd9SIsaku Yamahata     int mmu_idx;
21221ccde1cbSbellard 
21230f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
21241ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2125cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2126cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
21271ccde1cbSbellard }
21281ccde1cbSbellard 
2129d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by
2130d4c430a8SPaul Brook    large pages and trigger a full TLB flush if these are invalidated.  */
2131d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2132d4c430a8SPaul Brook                                target_ulong size)
2133d4c430a8SPaul Brook {
2134d4c430a8SPaul Brook     target_ulong mask = ~(size - 1);
2135d4c430a8SPaul Brook 
2136d4c430a8SPaul Brook     if (env->tlb_flush_addr == (target_ulong)-1) {
2137d4c430a8SPaul Brook         env->tlb_flush_addr = vaddr & mask;
2138d4c430a8SPaul Brook         env->tlb_flush_mask = mask;
2139d4c430a8SPaul Brook         return;
2140d4c430a8SPaul Brook     }
2141d4c430a8SPaul Brook     /* Extend the existing region to include the new page.
2142d4c430a8SPaul Brook        This is a compromise between unnecessary flushes and the cost
2143d4c430a8SPaul Brook        of maintaining a full variable size TLB.  */
2144d4c430a8SPaul Brook     mask &= env->tlb_flush_mask;
2145d4c430a8SPaul Brook     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2146d4c430a8SPaul Brook         mask <<= 1;
2147d4c430a8SPaul Brook     }
2148d4c430a8SPaul Brook     env->tlb_flush_addr &= mask;
2149d4c430a8SPaul Brook     env->tlb_flush_mask = mask;
2150d4c430a8SPaul Brook }
2151d4c430a8SPaul Brook 
2152d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address
2153d4c430a8SPaul Brook    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2154d4c430a8SPaul Brook    supplied size is only used by tlb_flush_page.  */
2155d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr,
2156c227f099SAnthony Liguori                   target_phys_addr_t paddr, int prot,
2157d4c430a8SPaul Brook                   int mmu_idx, target_ulong size)
21589fa3e853Sbellard {
215992e873b9Sbellard     PhysPageDesc *p;
21604f2ac237Sbellard     unsigned long pd;
21619fa3e853Sbellard     unsigned int index;
21624f2ac237Sbellard     target_ulong address;
21630f459d16Spbrook     target_ulong code_address;
2164355b1943SPaul Brook     unsigned long addend;
216584b7b8e7Sbellard     CPUTLBEntry *te;
2166a1d1bb31Saliguori     CPUWatchpoint *wp;
2167c227f099SAnthony Liguori     target_phys_addr_t iotlb;
21689fa3e853Sbellard 
2169d4c430a8SPaul Brook     assert(size >= TARGET_PAGE_SIZE);
2170d4c430a8SPaul Brook     if (size != TARGET_PAGE_SIZE) {
2171d4c430a8SPaul Brook         tlb_add_large_page(env, vaddr, size);
2172d4c430a8SPaul Brook     }
217392e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
21749fa3e853Sbellard     if (!p) {
21759fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
21769fa3e853Sbellard     } else {
21779fa3e853Sbellard         pd = p->phys_offset;
21789fa3e853Sbellard     }
21799fa3e853Sbellard #if defined(DEBUG_TLB)
21806ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
21816ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
21829fa3e853Sbellard #endif
21839fa3e853Sbellard 
21849fa3e853Sbellard     address = vaddr;
21850f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
21860f459d16Spbrook         /* IO memory case (romd handled later) */
21870f459d16Spbrook         address |= TLB_MMIO;
21880f459d16Spbrook     }
21895579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
21900f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
21910f459d16Spbrook         /* Normal RAM.  */
21920f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
21930f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
21940f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
21950f459d16Spbrook         else
21960f459d16Spbrook             iotlb |= IO_MEM_ROM;
21970f459d16Spbrook     } else {
2198ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
21990f459d16Spbrook            It would be nice to pass an offset from the base address
22000f459d16Spbrook            of that region.  This would avoid having to special case RAM,
22010f459d16Spbrook            and avoid full address decoding in every device.
22020f459d16Spbrook            We can't use the high bits of pd for this because
22030f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
22048da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
22058da3ff18Spbrook         if (p) {
22068da3ff18Spbrook             iotlb += p->region_offset;
22078da3ff18Spbrook         } else {
22088da3ff18Spbrook             iotlb += paddr;
22098da3ff18Spbrook         }
22109fa3e853Sbellard     }
22119fa3e853Sbellard 
22120f459d16Spbrook     code_address = address;
22136658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
22146658ffb8Spbrook        watchpoint trap routines.  */
221572cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2216a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
22170f459d16Spbrook             iotlb = io_mem_watch + paddr;
22180f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
22190f459d16Spbrook                reads of pages with a write breakpoint.  */
22200f459d16Spbrook             address |= TLB_MMIO;
22216658ffb8Spbrook         }
22226658ffb8Spbrook     }
22236658ffb8Spbrook 
222490f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
22250f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
22266ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
22270f459d16Spbrook     te->addend = addend - vaddr;
222867b915a5Sbellard     if (prot & PAGE_READ) {
222984b7b8e7Sbellard         te->addr_read = address;
22309fa3e853Sbellard     } else {
223184b7b8e7Sbellard         te->addr_read = -1;
223284b7b8e7Sbellard     }
22335c751e99Sedgar_igl 
223484b7b8e7Sbellard     if (prot & PAGE_EXEC) {
22350f459d16Spbrook         te->addr_code = code_address;
223684b7b8e7Sbellard     } else {
223784b7b8e7Sbellard         te->addr_code = -1;
22389fa3e853Sbellard     }
223967b915a5Sbellard     if (prot & PAGE_WRITE) {
2240856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2241856074ecSbellard             (pd & IO_MEM_ROMD)) {
22420f459d16Spbrook             /* Write access calls the I/O callback.  */
22430f459d16Spbrook             te->addr_write = address | TLB_MMIO;
22443a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
22451ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
22460f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
22479fa3e853Sbellard         } else {
224884b7b8e7Sbellard             te->addr_write = address;
22499fa3e853Sbellard         }
22509fa3e853Sbellard     } else {
225184b7b8e7Sbellard         te->addr_write = -1;
22529fa3e853Sbellard     }
22539fa3e853Sbellard }
22549fa3e853Sbellard 
22550124311eSbellard #else
22560124311eSbellard 
2257ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
22580124311eSbellard {
22590124311eSbellard }
22600124311eSbellard 
22612e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
22620124311eSbellard {
22630124311eSbellard }
22640124311eSbellard 
2265edf8e2afSMika Westerberg /*
2266edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2267edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2268edf8e2afSMika Westerberg  */
22695cd2c5b6SRichard Henderson 
22705cd2c5b6SRichard Henderson struct walk_memory_regions_data
227133417e70Sbellard {
22725cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
22735cd2c5b6SRichard Henderson     void *priv;
22745cd2c5b6SRichard Henderson     unsigned long start;
22755cd2c5b6SRichard Henderson     int prot;
22765cd2c5b6SRichard Henderson };
22779fa3e853Sbellard 
22785cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2279b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
22805cd2c5b6SRichard Henderson {
22815cd2c5b6SRichard Henderson     if (data->start != -1ul) {
22825cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
22835cd2c5b6SRichard Henderson         if (rc != 0) {
22845cd2c5b6SRichard Henderson             return rc;
22855cd2c5b6SRichard Henderson         }
22865cd2c5b6SRichard Henderson     }
2287edf8e2afSMika Westerberg 
22885cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
22895cd2c5b6SRichard Henderson     data->prot = new_prot;
22905cd2c5b6SRichard Henderson 
22915cd2c5b6SRichard Henderson     return 0;
229233417e70Sbellard }
22935cd2c5b6SRichard Henderson 
22945cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2295b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
22965cd2c5b6SRichard Henderson {
2297b480d9b7SPaul Brook     abi_ulong pa;
22985cd2c5b6SRichard Henderson     int i, rc;
22995cd2c5b6SRichard Henderson 
23005cd2c5b6SRichard Henderson     if (*lp == NULL) {
23015cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
23029fa3e853Sbellard     }
23035cd2c5b6SRichard Henderson 
23045cd2c5b6SRichard Henderson     if (level == 0) {
23055cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
23067296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
23075cd2c5b6SRichard Henderson             int prot = pd[i].flags;
23085cd2c5b6SRichard Henderson 
23095cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
23105cd2c5b6SRichard Henderson             if (prot != data->prot) {
23115cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
23125cd2c5b6SRichard Henderson                 if (rc != 0) {
23135cd2c5b6SRichard Henderson                     return rc;
23149fa3e853Sbellard                 }
23159fa3e853Sbellard             }
23165cd2c5b6SRichard Henderson         }
23175cd2c5b6SRichard Henderson     } else {
23185cd2c5b6SRichard Henderson         void **pp = *lp;
23197296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
2320b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
2321b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
23225cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
23235cd2c5b6SRichard Henderson             if (rc != 0) {
23245cd2c5b6SRichard Henderson                 return rc;
23255cd2c5b6SRichard Henderson             }
23265cd2c5b6SRichard Henderson         }
23275cd2c5b6SRichard Henderson     }
23285cd2c5b6SRichard Henderson 
23295cd2c5b6SRichard Henderson     return 0;
23305cd2c5b6SRichard Henderson }
23315cd2c5b6SRichard Henderson 
23325cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
23335cd2c5b6SRichard Henderson {
23345cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
23355cd2c5b6SRichard Henderson     unsigned long i;
23365cd2c5b6SRichard Henderson 
23375cd2c5b6SRichard Henderson     data.fn = fn;
23385cd2c5b6SRichard Henderson     data.priv = priv;
23395cd2c5b6SRichard Henderson     data.start = -1ul;
23405cd2c5b6SRichard Henderson     data.prot = 0;
23415cd2c5b6SRichard Henderson 
23425cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2343b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
23445cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
23455cd2c5b6SRichard Henderson         if (rc != 0) {
23465cd2c5b6SRichard Henderson             return rc;
23475cd2c5b6SRichard Henderson         }
23485cd2c5b6SRichard Henderson     }
23495cd2c5b6SRichard Henderson 
23505cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2351edf8e2afSMika Westerberg }
2352edf8e2afSMika Westerberg 
2353b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2354b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2355edf8e2afSMika Westerberg {
2356edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2357edf8e2afSMika Westerberg 
2358b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2359b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2360edf8e2afSMika Westerberg         start, end, end - start,
2361edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2362edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2363edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2364edf8e2afSMika Westerberg 
2365edf8e2afSMika Westerberg     return (0);
2366edf8e2afSMika Westerberg }
2367edf8e2afSMika Westerberg 
2368edf8e2afSMika Westerberg /* dump memory mappings */
2369edf8e2afSMika Westerberg void page_dump(FILE *f)
2370edf8e2afSMika Westerberg {
2371edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2372edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2373edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
23749fa3e853Sbellard }
23759fa3e853Sbellard 
237653a5960aSpbrook int page_get_flags(target_ulong address)
23779fa3e853Sbellard {
23789fa3e853Sbellard     PageDesc *p;
23799fa3e853Sbellard 
23809fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
23819fa3e853Sbellard     if (!p)
23829fa3e853Sbellard         return 0;
23839fa3e853Sbellard     return p->flags;
23849fa3e853Sbellard }
23859fa3e853Sbellard 
2386376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2387376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2388376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
238953a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
23909fa3e853Sbellard {
2391376a7909SRichard Henderson     target_ulong addr, len;
23929fa3e853Sbellard 
2393376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2394376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2395376a7909SRichard Henderson        a missing call to h2g_valid.  */
2396b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2397b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2398376a7909SRichard Henderson #endif
2399376a7909SRichard Henderson     assert(start < end);
2400376a7909SRichard Henderson 
24019fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
24029fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2403376a7909SRichard Henderson 
2404376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
24059fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2406376a7909SRichard Henderson     }
2407376a7909SRichard Henderson 
2408376a7909SRichard Henderson     for (addr = start, len = end - start;
2409376a7909SRichard Henderson          len != 0;
2410376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2412376a7909SRichard Henderson 
2413376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2414376a7909SRichard Henderson            the code inside.  */
24159fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
24169fa3e853Sbellard             (flags & PAGE_WRITE) &&
24179fa3e853Sbellard             p->first_tb) {
2418d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
24199fa3e853Sbellard         }
24209fa3e853Sbellard         p->flags = flags;
24219fa3e853Sbellard     }
24229fa3e853Sbellard }
24239fa3e853Sbellard 
24243d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
24253d97b40bSths {
24263d97b40bSths     PageDesc *p;
24273d97b40bSths     target_ulong end;
24283d97b40bSths     target_ulong addr;
24293d97b40bSths 
2430376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2431376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2432376a7909SRichard Henderson        a missing call to h2g_valid.  */
2433338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2434338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2435376a7909SRichard Henderson #endif
2436376a7909SRichard Henderson 
24373e0650a9SRichard Henderson     if (len == 0) {
24383e0650a9SRichard Henderson         return 0;
24393e0650a9SRichard Henderson     }
2440376a7909SRichard Henderson     if (start + len - 1 < start) {
2441376a7909SRichard Henderson         /* We've wrapped around.  */
244255f280c9Sbalrog         return -1;
2443376a7909SRichard Henderson     }
244455f280c9Sbalrog 
24453d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
24463d97b40bSths     start = start & TARGET_PAGE_MASK;
24473d97b40bSths 
2448376a7909SRichard Henderson     for (addr = start, len = end - start;
2449376a7909SRichard Henderson          len != 0;
2450376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
24513d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
24523d97b40bSths         if( !p )
24533d97b40bSths             return -1;
24543d97b40bSths         if( !(p->flags & PAGE_VALID) )
24553d97b40bSths             return -1;
24563d97b40bSths 
2457dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
24583d97b40bSths             return -1;
2459dae3270cSbellard         if (flags & PAGE_WRITE) {
2460dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
24613d97b40bSths                 return -1;
2462dae3270cSbellard             /* unprotect the page if it was put read-only because it
2463dae3270cSbellard                contains translated code */
2464dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2465dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2466dae3270cSbellard                     return -1;
2467dae3270cSbellard             }
2468dae3270cSbellard             return 0;
2469dae3270cSbellard         }
24703d97b40bSths     }
24713d97b40bSths     return 0;
24723d97b40bSths }
24733d97b40bSths 
24749fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2475ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
247653a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
24779fa3e853Sbellard {
247845d679d6SAurelien Jarno     unsigned int prot;
247945d679d6SAurelien Jarno     PageDesc *p;
248053a5960aSpbrook     target_ulong host_start, host_end, addr;
24819fa3e853Sbellard 
2482c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2483c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2484c8a706feSpbrook        practice it seems to be ok.  */
2485c8a706feSpbrook     mmap_lock();
2486c8a706feSpbrook 
248745d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
248845d679d6SAurelien Jarno     if (!p) {
2489c8a706feSpbrook         mmap_unlock();
24909fa3e853Sbellard         return 0;
2491c8a706feSpbrook     }
249245d679d6SAurelien Jarno 
24939fa3e853Sbellard     /* if the page was really writable, then we change its
24949fa3e853Sbellard        protection back to writable */
249545d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
249645d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
249745d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
249845d679d6SAurelien Jarno 
249945d679d6SAurelien Jarno         prot = 0;
250045d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
250145d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
250245d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
250345d679d6SAurelien Jarno             prot |= p->flags;
250445d679d6SAurelien Jarno 
25059fa3e853Sbellard             /* and since the content will be modified, we must invalidate
25069fa3e853Sbellard                the corresponding translated code. */
250745d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
25089fa3e853Sbellard #ifdef DEBUG_TB_CHECK
250945d679d6SAurelien Jarno             tb_invalidate_check(addr);
25109fa3e853Sbellard #endif
251145d679d6SAurelien Jarno         }
251245d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
251345d679d6SAurelien Jarno                  prot & PAGE_BITS);
251445d679d6SAurelien Jarno 
2515c8a706feSpbrook         mmap_unlock();
25169fa3e853Sbellard         return 1;
25179fa3e853Sbellard     }
2518c8a706feSpbrook     mmap_unlock();
25199fa3e853Sbellard     return 0;
25209fa3e853Sbellard }
25219fa3e853Sbellard 
25226a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
25236a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
25241ccde1cbSbellard {
25251ccde1cbSbellard }
25269fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
252733417e70Sbellard 
2528e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
25298da3ff18Spbrook 
2530c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2531c04b2b78SPaul Brook typedef struct subpage_t {
2532c04b2b78SPaul Brook     target_phys_addr_t base;
2533f6405247SRichard Henderson     ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2534f6405247SRichard Henderson     ram_addr_t region_offset[TARGET_PAGE_SIZE];
2535c04b2b78SPaul Brook } subpage_t;
2536c04b2b78SPaul Brook 
2537c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2538c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2539f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2540f6405247SRichard Henderson                                 ram_addr_t orig_memory,
2541f6405247SRichard Henderson                                 ram_addr_t region_offset);
2542db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2543db7b5426Sblueswir1                       need_subpage)                                     \
2544db7b5426Sblueswir1     do {                                                                \
2545db7b5426Sblueswir1         if (addr > start_addr)                                          \
2546db7b5426Sblueswir1             start_addr2 = 0;                                            \
2547db7b5426Sblueswir1         else {                                                          \
2548db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2549db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2550db7b5426Sblueswir1                 need_subpage = 1;                                       \
2551db7b5426Sblueswir1         }                                                               \
2552db7b5426Sblueswir1                                                                         \
255349e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2554db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2555db7b5426Sblueswir1         else {                                                          \
2556db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2557db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2558db7b5426Sblueswir1                 need_subpage = 1;                                       \
2559db7b5426Sblueswir1         }                                                               \
2560db7b5426Sblueswir1     } while (0)
2561db7b5426Sblueswir1 
25628f2498f9SMichael S. Tsirkin /* register physical memory.
25638f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
25648f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
25658da3ff18Spbrook    io memory page.  The address used when calling the IO function is
25668da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2567ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
25688da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
25698da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
2570c227f099SAnthony Liguori void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2571c227f099SAnthony Liguori                                          ram_addr_t size,
2572c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
2573c227f099SAnthony Liguori                                          ram_addr_t region_offset)
257433417e70Sbellard {
2575c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
257692e873b9Sbellard     PhysPageDesc *p;
25779d42037bSbellard     CPUState *env;
2578c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2579f6405247SRichard Henderson     subpage_t *subpage;
258033417e70Sbellard 
2581f6f3fbcaSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset);
2582f6f3fbcaSMichael S. Tsirkin 
258367c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
258467c4d23cSpbrook         region_offset = start_addr;
258567c4d23cSpbrook     }
25868da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
25875fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2588c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
258949e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2590db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2591db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2592c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2593c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2594db7b5426Sblueswir1             int need_subpage = 0;
2595db7b5426Sblueswir1 
2596db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2597db7b5426Sblueswir1                           need_subpage);
2598f6405247SRichard Henderson             if (need_subpage) {
2599db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2600db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26018da3ff18Spbrook                                            &p->phys_offset, orig_memory,
26028da3ff18Spbrook                                            p->region_offset);
2603db7b5426Sblueswir1                 } else {
2604db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2605db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2606db7b5426Sblueswir1                 }
26078da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
26088da3ff18Spbrook                                  region_offset);
26098da3ff18Spbrook                 p->region_offset = 0;
2610db7b5426Sblueswir1             } else {
2611db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2612db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2614db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2615db7b5426Sblueswir1             }
2616db7b5426Sblueswir1         } else {
2617108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
26189fa3e853Sbellard             p->phys_offset = phys_offset;
26198da3ff18Spbrook             p->region_offset = region_offset;
26202a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
26218da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
262233417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
26238da3ff18Spbrook             } else {
2624c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2625db7b5426Sblueswir1                 int need_subpage = 0;
2626db7b5426Sblueswir1 
2627db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2628db7b5426Sblueswir1                               end_addr2, need_subpage);
2629db7b5426Sblueswir1 
2630f6405247SRichard Henderson                 if (need_subpage) {
2631db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
26328da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
263367c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2634db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
26358da3ff18Spbrook                                      phys_offset, region_offset);
26368da3ff18Spbrook                     p->region_offset = 0;
2637db7b5426Sblueswir1                 }
2638db7b5426Sblueswir1             }
2639db7b5426Sblueswir1         }
26408da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
264133417e70Sbellard     }
26429d42037bSbellard 
26439d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
26449d42037bSbellard        reset the modified entries */
26459d42037bSbellard     /* XXX: slow ! */
26469d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
26479d42037bSbellard         tlb_flush(env, 1);
26489d42037bSbellard     }
264933417e70Sbellard }
265033417e70Sbellard 
2651ba863458Sbellard /* XXX: temporary until new memory mapping API */
2652c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2653ba863458Sbellard {
2654ba863458Sbellard     PhysPageDesc *p;
2655ba863458Sbellard 
2656ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657ba863458Sbellard     if (!p)
2658ba863458Sbellard         return IO_MEM_UNASSIGNED;
2659ba863458Sbellard     return p->phys_offset;
2660ba863458Sbellard }
2661ba863458Sbellard 
2662c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2663f65ed4c1Saliguori {
2664f65ed4c1Saliguori     if (kvm_enabled())
2665f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2666f65ed4c1Saliguori }
2667f65ed4c1Saliguori 
2668c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2669f65ed4c1Saliguori {
2670f65ed4c1Saliguori     if (kvm_enabled())
2671f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2672f65ed4c1Saliguori }
2673f65ed4c1Saliguori 
267462a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
267562a2744cSSheng Yang {
267662a2744cSSheng Yang     if (kvm_enabled())
267762a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
267862a2744cSSheng Yang }
267962a2744cSSheng Yang 
2680c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2681c902760fSMarcelo Tosatti 
2682c902760fSMarcelo Tosatti #include <sys/vfs.h>
2683c902760fSMarcelo Tosatti 
2684c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2685c902760fSMarcelo Tosatti 
2686c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2687c902760fSMarcelo Tosatti {
2688c902760fSMarcelo Tosatti     struct statfs fs;
2689c902760fSMarcelo Tosatti     int ret;
2690c902760fSMarcelo Tosatti 
2691c902760fSMarcelo Tosatti     do {
2692c902760fSMarcelo Tosatti 	    ret = statfs(path, &fs);
2693c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2694c902760fSMarcelo Tosatti 
2695c902760fSMarcelo Tosatti     if (ret != 0) {
26966adc0549SMichael Tokarev 	    perror(path);
2697c902760fSMarcelo Tosatti 	    return 0;
2698c902760fSMarcelo Tosatti     }
2699c902760fSMarcelo Tosatti 
2700c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2701c902760fSMarcelo Tosatti 	    fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2702c902760fSMarcelo Tosatti 
2703c902760fSMarcelo Tosatti     return fs.f_bsize;
2704c902760fSMarcelo Tosatti }
2705c902760fSMarcelo Tosatti 
2706c902760fSMarcelo Tosatti static void *file_ram_alloc(ram_addr_t memory, const char *path)
2707c902760fSMarcelo Tosatti {
2708c902760fSMarcelo Tosatti     char *filename;
2709c902760fSMarcelo Tosatti     void *area;
2710c902760fSMarcelo Tosatti     int fd;
2711c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2712c902760fSMarcelo Tosatti     int flags;
2713c902760fSMarcelo Tosatti #endif
2714c902760fSMarcelo Tosatti     unsigned long hpagesize;
2715c902760fSMarcelo Tosatti 
2716c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2717c902760fSMarcelo Tosatti     if (!hpagesize) {
2718c902760fSMarcelo Tosatti 	return NULL;
2719c902760fSMarcelo Tosatti     }
2720c902760fSMarcelo Tosatti 
2721c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2722c902760fSMarcelo Tosatti         return NULL;
2723c902760fSMarcelo Tosatti     }
2724c902760fSMarcelo Tosatti 
2725c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727c902760fSMarcelo Tosatti         return NULL;
2728c902760fSMarcelo Tosatti     }
2729c902760fSMarcelo Tosatti 
2730c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731c902760fSMarcelo Tosatti 	return NULL;
2732c902760fSMarcelo Tosatti     }
2733c902760fSMarcelo Tosatti 
2734c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2735c902760fSMarcelo Tosatti     if (fd < 0) {
27366adc0549SMichael Tokarev 	perror("unable to create backing store for hugepages");
2737c902760fSMarcelo Tosatti 	free(filename);
2738c902760fSMarcelo Tosatti 	return NULL;
2739c902760fSMarcelo Tosatti     }
2740c902760fSMarcelo Tosatti     unlink(filename);
2741c902760fSMarcelo Tosatti     free(filename);
2742c902760fSMarcelo Tosatti 
2743c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744c902760fSMarcelo Tosatti 
2745c902760fSMarcelo Tosatti     /*
2746c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2747c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2748c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2749c902760fSMarcelo Tosatti      * mmap will fail.
2750c902760fSMarcelo Tosatti      */
2751c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2752c902760fSMarcelo Tosatti 	perror("ftruncate");
2753c902760fSMarcelo Tosatti 
2754c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2755c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2757c902760fSMarcelo Tosatti      * to sidestep this quirk.
2758c902760fSMarcelo Tosatti      */
2759c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761c902760fSMarcelo Tosatti #else
2762c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763c902760fSMarcelo Tosatti #endif
2764c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2765c902760fSMarcelo Tosatti 	perror("file_ram_alloc: can't mmap RAM pages");
2766c902760fSMarcelo Tosatti 	close(fd);
2767c902760fSMarcelo Tosatti 	return (NULL);
2768c902760fSMarcelo Tosatti     }
2769c902760fSMarcelo Tosatti     return area;
2770c902760fSMarcelo Tosatti }
2771c902760fSMarcelo Tosatti #endif
2772c902760fSMarcelo Tosatti 
2773c227f099SAnthony Liguori ram_addr_t qemu_ram_alloc(ram_addr_t size)
277494a6b54fSpbrook {
277594a6b54fSpbrook     RAMBlock *new_block;
277694a6b54fSpbrook 
277794a6b54fSpbrook     size = TARGET_PAGE_ALIGN(size);
277894a6b54fSpbrook     new_block = qemu_malloc(sizeof(*new_block));
277994a6b54fSpbrook 
2780c902760fSMarcelo Tosatti     if (mem_path) {
2781c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
2782c902760fSMarcelo Tosatti         new_block->host = file_ram_alloc(size, mem_path);
2783618a568dSMarcelo Tosatti         if (!new_block->host) {
2784618a568dSMarcelo Tosatti             new_block->host = qemu_vmalloc(size);
2785618a568dSMarcelo Tosatti #ifdef MADV_MERGEABLE
2786618a568dSMarcelo Tosatti             madvise(new_block->host, size, MADV_MERGEABLE);
2787618a568dSMarcelo Tosatti #endif
2788618a568dSMarcelo Tosatti         }
2789c902760fSMarcelo Tosatti #else
2790c902760fSMarcelo Tosatti         fprintf(stderr, "-mem-path option unsupported\n");
2791c902760fSMarcelo Tosatti         exit(1);
2792c902760fSMarcelo Tosatti #endif
2793c902760fSMarcelo Tosatti     } else {
27946b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
27956b02494dSAlexander Graf         /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2796c902760fSMarcelo Tosatti         new_block->host = mmap((void*)0x1000000, size,
2797c902760fSMarcelo Tosatti                                 PROT_EXEC|PROT_READ|PROT_WRITE,
27986b02494dSAlexander Graf                                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
27996b02494dSAlexander Graf #else
280094a6b54fSpbrook         new_block->host = qemu_vmalloc(size);
28016b02494dSAlexander Graf #endif
2802ccb167e9SIzik Eidus #ifdef MADV_MERGEABLE
2803ccb167e9SIzik Eidus         madvise(new_block->host, size, MADV_MERGEABLE);
2804ccb167e9SIzik Eidus #endif
2805c902760fSMarcelo Tosatti     }
280694a6b54fSpbrook     new_block->offset = last_ram_offset;
280794a6b54fSpbrook     new_block->length = size;
280894a6b54fSpbrook 
280994a6b54fSpbrook     new_block->next = ram_blocks;
281094a6b54fSpbrook     ram_blocks = new_block;
281194a6b54fSpbrook 
281294a6b54fSpbrook     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
281394a6b54fSpbrook         (last_ram_offset + size) >> TARGET_PAGE_BITS);
281494a6b54fSpbrook     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
281594a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
281694a6b54fSpbrook 
281794a6b54fSpbrook     last_ram_offset += size;
281894a6b54fSpbrook 
28196f0437e8SJan Kiszka     if (kvm_enabled())
28206f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
28216f0437e8SJan Kiszka 
282294a6b54fSpbrook     return new_block->offset;
282394a6b54fSpbrook }
2824e9a1ab19Sbellard 
2825c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2826e9a1ab19Sbellard {
282794a6b54fSpbrook     /* TODO: implement this.  */
2828e9a1ab19Sbellard }
2829e9a1ab19Sbellard 
2830dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
28315579c7f3Spbrook    With the exception of the softmmu code in this file, this should
28325579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
28335579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
28345579c7f3Spbrook 
28355579c7f3Spbrook    It should not be used for general purpose DMA.
28365579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
28375579c7f3Spbrook  */
2838c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2839dc828ca1Spbrook {
284094a6b54fSpbrook     RAMBlock *prev;
284194a6b54fSpbrook     RAMBlock **prevp;
284294a6b54fSpbrook     RAMBlock *block;
284394a6b54fSpbrook 
284494a6b54fSpbrook     prev = NULL;
284594a6b54fSpbrook     prevp = &ram_blocks;
284694a6b54fSpbrook     block = ram_blocks;
284794a6b54fSpbrook     while (block && (block->offset > addr
284894a6b54fSpbrook                      || block->offset + block->length <= addr)) {
284994a6b54fSpbrook         if (prev)
285094a6b54fSpbrook           prevp = &prev->next;
285194a6b54fSpbrook         prev = block;
285294a6b54fSpbrook         block = block->next;
285394a6b54fSpbrook     }
285494a6b54fSpbrook     if (!block) {
285594a6b54fSpbrook         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
285694a6b54fSpbrook         abort();
285794a6b54fSpbrook     }
285894a6b54fSpbrook     /* Move this entry to to start of the list.  */
285994a6b54fSpbrook     if (prev) {
286094a6b54fSpbrook         prev->next = block->next;
286194a6b54fSpbrook         block->next = *prevp;
286294a6b54fSpbrook         *prevp = block;
286394a6b54fSpbrook     }
286494a6b54fSpbrook     return block->host + (addr - block->offset);
2865dc828ca1Spbrook }
2866dc828ca1Spbrook 
28675579c7f3Spbrook /* Some of the softmmu routines need to translate from a host pointer
28685579c7f3Spbrook    (typically a TLB entry) back to a ram offset.  */
2869c227f099SAnthony Liguori ram_addr_t qemu_ram_addr_from_host(void *ptr)
28705579c7f3Spbrook {
287194a6b54fSpbrook     RAMBlock *block;
287294a6b54fSpbrook     uint8_t *host = ptr;
287394a6b54fSpbrook 
287494a6b54fSpbrook     block = ram_blocks;
287594a6b54fSpbrook     while (block && (block->host > host
287694a6b54fSpbrook                      || block->host + block->length <= host)) {
287794a6b54fSpbrook         block = block->next;
287894a6b54fSpbrook     }
287994a6b54fSpbrook     if (!block) {
288094a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
288194a6b54fSpbrook         abort();
288294a6b54fSpbrook     }
288394a6b54fSpbrook     return block->offset + (host - block->host);
28845579c7f3Spbrook }
28855579c7f3Spbrook 
2886c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
288733417e70Sbellard {
288867d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2889ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
289067d3b957Spbrook #endif
2891faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2892e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2893e18231a3Sblueswir1 #endif
2894e18231a3Sblueswir1     return 0;
2895e18231a3Sblueswir1 }
2896e18231a3Sblueswir1 
2897c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2898e18231a3Sblueswir1 {
2899e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2900e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2901e18231a3Sblueswir1 #endif
2902faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2903e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2904e18231a3Sblueswir1 #endif
2905e18231a3Sblueswir1     return 0;
2906e18231a3Sblueswir1 }
2907e18231a3Sblueswir1 
2908c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2909e18231a3Sblueswir1 {
2910e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2911e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2912e18231a3Sblueswir1 #endif
2913faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2914e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2915b4f0a316Sblueswir1 #endif
291633417e70Sbellard     return 0;
291733417e70Sbellard }
291833417e70Sbellard 
2919c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
292033417e70Sbellard {
292167d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2922ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
292367d3b957Spbrook #endif
2924faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2925e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2926e18231a3Sblueswir1 #endif
2927e18231a3Sblueswir1 }
2928e18231a3Sblueswir1 
2929c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2930e18231a3Sblueswir1 {
2931e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2932e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2933e18231a3Sblueswir1 #endif
2934faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2935e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2936e18231a3Sblueswir1 #endif
2937e18231a3Sblueswir1 }
2938e18231a3Sblueswir1 
2939c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2940e18231a3Sblueswir1 {
2941e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2942e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2943e18231a3Sblueswir1 #endif
2944faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2945e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2946b4f0a316Sblueswir1 #endif
294733417e70Sbellard }
294833417e70Sbellard 
2949d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
295033417e70Sbellard     unassigned_mem_readb,
2951e18231a3Sblueswir1     unassigned_mem_readw,
2952e18231a3Sblueswir1     unassigned_mem_readl,
295333417e70Sbellard };
295433417e70Sbellard 
2955d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
295633417e70Sbellard     unassigned_mem_writeb,
2957e18231a3Sblueswir1     unassigned_mem_writew,
2958e18231a3Sblueswir1     unassigned_mem_writel,
295933417e70Sbellard };
296033417e70Sbellard 
2961c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
29620f459d16Spbrook                                 uint32_t val)
29631ccde1cbSbellard {
29643a7d929eSbellard     int dirty_flags;
2965f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29663a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
29673a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
29683a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
2969f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29703a7d929eSbellard #endif
29713a7d929eSbellard     }
29725579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
2973f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2974f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2975f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2976f23db169Sbellard        flushed */
2977f23db169Sbellard     if (dirty_flags == 0xff)
29782e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29791ccde1cbSbellard }
29801ccde1cbSbellard 
2981c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
29820f459d16Spbrook                                 uint32_t val)
29831ccde1cbSbellard {
29843a7d929eSbellard     int dirty_flags;
2985f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29863a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
29873a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
29883a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
2989f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29903a7d929eSbellard #endif
29913a7d929eSbellard     }
29925579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
2993f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2994f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2995f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2996f23db169Sbellard        flushed */
2997f23db169Sbellard     if (dirty_flags == 0xff)
29982e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29991ccde1cbSbellard }
30001ccde1cbSbellard 
3001c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
30020f459d16Spbrook                                 uint32_t val)
30031ccde1cbSbellard {
30043a7d929eSbellard     int dirty_flags;
3005f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30063a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
30073a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
30083a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
3009f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
30103a7d929eSbellard #endif
30113a7d929eSbellard     }
30125579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
3013f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3014f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3015f23db169Sbellard     /* we remove the notdirty callback only if the code has been
3016f23db169Sbellard        flushed */
3017f23db169Sbellard     if (dirty_flags == 0xff)
30182e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
30191ccde1cbSbellard }
30201ccde1cbSbellard 
3021d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
30223a7d929eSbellard     NULL, /* never used */
30233a7d929eSbellard     NULL, /* never used */
30243a7d929eSbellard     NULL, /* never used */
30253a7d929eSbellard };
30263a7d929eSbellard 
3027d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
30281ccde1cbSbellard     notdirty_mem_writeb,
30291ccde1cbSbellard     notdirty_mem_writew,
30301ccde1cbSbellard     notdirty_mem_writel,
30311ccde1cbSbellard };
30321ccde1cbSbellard 
30330f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
3034b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
30350f459d16Spbrook {
30360f459d16Spbrook     CPUState *env = cpu_single_env;
303706d55cc1Saliguori     target_ulong pc, cs_base;
303806d55cc1Saliguori     TranslationBlock *tb;
30390f459d16Spbrook     target_ulong vaddr;
3040a1d1bb31Saliguori     CPUWatchpoint *wp;
304106d55cc1Saliguori     int cpu_flags;
30420f459d16Spbrook 
304306d55cc1Saliguori     if (env->watchpoint_hit) {
304406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
304506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
304606d55cc1Saliguori          * current instruction. */
304706d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
304806d55cc1Saliguori         return;
304906d55cc1Saliguori     }
30502e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
305172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3052b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
3053b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
30546e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
30556e140f28Saliguori             if (!env->watchpoint_hit) {
3056a1d1bb31Saliguori                 env->watchpoint_hit = wp;
305706d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
305806d55cc1Saliguori                 if (!tb) {
30596e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
30606e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
306106d55cc1Saliguori                 }
306206d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
306306d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
306406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
306506d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
306606d55cc1Saliguori                 } else {
306706d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
306806d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
306906d55cc1Saliguori                 }
307006d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
30710f459d16Spbrook             }
30726e140f28Saliguori         } else {
30736e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
30746e140f28Saliguori         }
30750f459d16Spbrook     }
30760f459d16Spbrook }
30770f459d16Spbrook 
30786658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
30796658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
30806658ffb8Spbrook    phys routines.  */
3081c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
30826658ffb8Spbrook {
3083b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
30846658ffb8Spbrook     return ldub_phys(addr);
30856658ffb8Spbrook }
30866658ffb8Spbrook 
3087c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
30886658ffb8Spbrook {
3089b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
30906658ffb8Spbrook     return lduw_phys(addr);
30916658ffb8Spbrook }
30926658ffb8Spbrook 
3093c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
30946658ffb8Spbrook {
3095b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
30966658ffb8Spbrook     return ldl_phys(addr);
30976658ffb8Spbrook }
30986658ffb8Spbrook 
3099c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
31006658ffb8Spbrook                              uint32_t val)
31016658ffb8Spbrook {
3102b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
31036658ffb8Spbrook     stb_phys(addr, val);
31046658ffb8Spbrook }
31056658ffb8Spbrook 
3106c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
31076658ffb8Spbrook                              uint32_t val)
31086658ffb8Spbrook {
3109b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
31106658ffb8Spbrook     stw_phys(addr, val);
31116658ffb8Spbrook }
31126658ffb8Spbrook 
3113c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
31146658ffb8Spbrook                              uint32_t val)
31156658ffb8Spbrook {
3116b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
31176658ffb8Spbrook     stl_phys(addr, val);
31186658ffb8Spbrook }
31196658ffb8Spbrook 
3120d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
31216658ffb8Spbrook     watch_mem_readb,
31226658ffb8Spbrook     watch_mem_readw,
31236658ffb8Spbrook     watch_mem_readl,
31246658ffb8Spbrook };
31256658ffb8Spbrook 
3126d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
31276658ffb8Spbrook     watch_mem_writeb,
31286658ffb8Spbrook     watch_mem_writew,
31296658ffb8Spbrook     watch_mem_writel,
31306658ffb8Spbrook };
31316658ffb8Spbrook 
3132f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio,
3133f6405247SRichard Henderson                                         target_phys_addr_t addr,
3134db7b5426Sblueswir1                                         unsigned int len)
3135db7b5426Sblueswir1 {
3136f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3137db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3138db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3139db7b5426Sblueswir1            mmio, len, addr, idx);
3140db7b5426Sblueswir1 #endif
3141db7b5426Sblueswir1 
3142f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3143f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3144f6405247SRichard Henderson     return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3145db7b5426Sblueswir1 }
3146db7b5426Sblueswir1 
3147c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3148db7b5426Sblueswir1                                      uint32_t value, unsigned int len)
3149db7b5426Sblueswir1 {
3150f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
3151db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3152f6405247SRichard Henderson     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3153f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3154db7b5426Sblueswir1 #endif
3155f6405247SRichard Henderson 
3156f6405247SRichard Henderson     addr += mmio->region_offset[idx];
3157f6405247SRichard Henderson     idx = mmio->sub_io_index[idx];
3158f6405247SRichard Henderson     io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3159db7b5426Sblueswir1 }
3160db7b5426Sblueswir1 
3161c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3162db7b5426Sblueswir1 {
3163db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3164db7b5426Sblueswir1 }
3165db7b5426Sblueswir1 
3166c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3167db7b5426Sblueswir1                             uint32_t value)
3168db7b5426Sblueswir1 {
3169db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3170db7b5426Sblueswir1 }
3171db7b5426Sblueswir1 
3172c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3173db7b5426Sblueswir1 {
3174db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3175db7b5426Sblueswir1 }
3176db7b5426Sblueswir1 
3177c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3178db7b5426Sblueswir1                             uint32_t value)
3179db7b5426Sblueswir1 {
3180db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3181db7b5426Sblueswir1 }
3182db7b5426Sblueswir1 
3183c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3184db7b5426Sblueswir1 {
3185db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3186db7b5426Sblueswir1 }
3187db7b5426Sblueswir1 
3188f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr,
3189f6405247SRichard Henderson                             uint32_t value)
3190db7b5426Sblueswir1 {
3191db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3192db7b5426Sblueswir1 }
3193db7b5426Sblueswir1 
3194d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3195db7b5426Sblueswir1     &subpage_readb,
3196db7b5426Sblueswir1     &subpage_readw,
3197db7b5426Sblueswir1     &subpage_readl,
3198db7b5426Sblueswir1 };
3199db7b5426Sblueswir1 
3200d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3201db7b5426Sblueswir1     &subpage_writeb,
3202db7b5426Sblueswir1     &subpage_writew,
3203db7b5426Sblueswir1     &subpage_writel,
3204db7b5426Sblueswir1 };
3205db7b5426Sblueswir1 
3206c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3207c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3208db7b5426Sblueswir1 {
3209db7b5426Sblueswir1     int idx, eidx;
3210db7b5426Sblueswir1 
3211db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3212db7b5426Sblueswir1         return -1;
3213db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3214db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3215db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
32160bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3217db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3218db7b5426Sblueswir1 #endif
3219f6405247SRichard Henderson     memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3220db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
3221f6405247SRichard Henderson         mmio->sub_io_index[idx] = memory;
3222f6405247SRichard Henderson         mmio->region_offset[idx] = region_offset;
3223db7b5426Sblueswir1     }
3224db7b5426Sblueswir1 
3225db7b5426Sblueswir1     return 0;
3226db7b5426Sblueswir1 }
3227db7b5426Sblueswir1 
3228f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3229f6405247SRichard Henderson                                 ram_addr_t orig_memory,
3230f6405247SRichard Henderson                                 ram_addr_t region_offset)
3231db7b5426Sblueswir1 {
3232c227f099SAnthony Liguori     subpage_t *mmio;
3233db7b5426Sblueswir1     int subpage_memory;
3234db7b5426Sblueswir1 
3235c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
32361eec614bSaliguori 
3237db7b5426Sblueswir1     mmio->base = base;
32381eed09cbSAvi Kivity     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3239db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3240db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3241db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3242db7b5426Sblueswir1 #endif
3243db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
3244f6405247SRichard Henderson     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3245db7b5426Sblueswir1 
3246db7b5426Sblueswir1     return mmio;
3247db7b5426Sblueswir1 }
3248db7b5426Sblueswir1 
324988715657Saliguori static int get_free_io_mem_idx(void)
325088715657Saliguori {
325188715657Saliguori     int i;
325288715657Saliguori 
325388715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
325488715657Saliguori         if (!io_mem_used[i]) {
325588715657Saliguori             io_mem_used[i] = 1;
325688715657Saliguori             return i;
325788715657Saliguori         }
3258c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
325988715657Saliguori     return -1;
326088715657Saliguori }
326188715657Saliguori 
326233417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
326333417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
32640b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
32653ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
32664254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
32674254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
32684254fab8Sblueswir1    returned if error. */
32691eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3270d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3271d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3272a4193c8aSbellard                                         void *opaque)
327333417e70Sbellard {
32743cab721dSRichard Henderson     int i;
32753cab721dSRichard Henderson 
327633417e70Sbellard     if (io_index <= 0) {
327788715657Saliguori         io_index = get_free_io_mem_idx();
327888715657Saliguori         if (io_index == -1)
327988715657Saliguori             return io_index;
328033417e70Sbellard     } else {
32811eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
328233417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
328333417e70Sbellard             return -1;
328433417e70Sbellard     }
328533417e70Sbellard 
32863cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
32873cab721dSRichard Henderson         io_mem_read[io_index][i]
32883cab721dSRichard Henderson             = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
32893cab721dSRichard Henderson     }
32903cab721dSRichard Henderson     for (i = 0; i < 3; ++i) {
32913cab721dSRichard Henderson         io_mem_write[io_index][i]
32923cab721dSRichard Henderson             = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
32933cab721dSRichard Henderson     }
3294a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
3295f6405247SRichard Henderson 
3296f6405247SRichard Henderson     return (io_index << IO_MEM_SHIFT);
329733417e70Sbellard }
329861382a50Sbellard 
3299d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3300d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
33011eed09cbSAvi Kivity                            void *opaque)
33021eed09cbSAvi Kivity {
33031eed09cbSAvi Kivity     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
33041eed09cbSAvi Kivity }
33051eed09cbSAvi Kivity 
330688715657Saliguori void cpu_unregister_io_memory(int io_table_address)
330788715657Saliguori {
330888715657Saliguori     int i;
330988715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
331088715657Saliguori 
331188715657Saliguori     for (i=0;i < 3; i++) {
331288715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
331388715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
331488715657Saliguori     }
331588715657Saliguori     io_mem_opaque[io_index] = NULL;
331688715657Saliguori     io_mem_used[io_index] = 0;
331788715657Saliguori }
331888715657Saliguori 
3319e9179ce1SAvi Kivity static void io_mem_init(void)
3320e9179ce1SAvi Kivity {
3321e9179ce1SAvi Kivity     int i;
3322e9179ce1SAvi Kivity 
3323e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3324e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3325e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3326e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3327e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3328e9179ce1SAvi Kivity 
3329e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3330e9179ce1SAvi Kivity                                           watch_mem_write, NULL);
3331e9179ce1SAvi Kivity }
3332e9179ce1SAvi Kivity 
3333e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3334e2eef170Spbrook 
333513eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
333613eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3337a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3338a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
333913eb76e0Sbellard {
334013eb76e0Sbellard     int l, flags;
334113eb76e0Sbellard     target_ulong page;
334253a5960aSpbrook     void * p;
334313eb76e0Sbellard 
334413eb76e0Sbellard     while (len > 0) {
334513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
334613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
334713eb76e0Sbellard         if (l > len)
334813eb76e0Sbellard             l = len;
334913eb76e0Sbellard         flags = page_get_flags(page);
335013eb76e0Sbellard         if (!(flags & PAGE_VALID))
3351a68fe89cSPaul Brook             return -1;
335213eb76e0Sbellard         if (is_write) {
335313eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3354a68fe89cSPaul Brook                 return -1;
3355579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
335672fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3357a68fe89cSPaul Brook                 return -1;
335872fb7daaSaurel32             memcpy(p, buf, l);
335972fb7daaSaurel32             unlock_user(p, addr, l);
336013eb76e0Sbellard         } else {
336113eb76e0Sbellard             if (!(flags & PAGE_READ))
3362a68fe89cSPaul Brook                 return -1;
3363579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
336472fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3365a68fe89cSPaul Brook                 return -1;
336672fb7daaSaurel32             memcpy(buf, p, l);
33675b257578Saurel32             unlock_user(p, addr, 0);
336813eb76e0Sbellard         }
336913eb76e0Sbellard         len -= l;
337013eb76e0Sbellard         buf += l;
337113eb76e0Sbellard         addr += l;
337213eb76e0Sbellard     }
3373a68fe89cSPaul Brook     return 0;
337413eb76e0Sbellard }
33758df1cd07Sbellard 
337613eb76e0Sbellard #else
3377c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
337813eb76e0Sbellard                             int len, int is_write)
337913eb76e0Sbellard {
338013eb76e0Sbellard     int l, io_index;
338113eb76e0Sbellard     uint8_t *ptr;
338213eb76e0Sbellard     uint32_t val;
3383c227f099SAnthony Liguori     target_phys_addr_t page;
33842e12669aSbellard     unsigned long pd;
338592e873b9Sbellard     PhysPageDesc *p;
338613eb76e0Sbellard 
338713eb76e0Sbellard     while (len > 0) {
338813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
338913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
339013eb76e0Sbellard         if (l > len)
339113eb76e0Sbellard             l = len;
339292e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
339313eb76e0Sbellard         if (!p) {
339413eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
339513eb76e0Sbellard         } else {
339613eb76e0Sbellard             pd = p->phys_offset;
339713eb76e0Sbellard         }
339813eb76e0Sbellard 
339913eb76e0Sbellard         if (is_write) {
34003a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3401c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
340213eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34038da3ff18Spbrook                 if (p)
34046c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34056a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
34066a00d601Sbellard                    potential bugs */
34076c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
34081c213d19Sbellard                     /* 32 bit write access */
3409c27004ecSbellard                     val = ldl_p(buf);
34106c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
341113eb76e0Sbellard                     l = 4;
34126c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
34131c213d19Sbellard                     /* 16 bit write access */
3414c27004ecSbellard                     val = lduw_p(buf);
34156c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
341613eb76e0Sbellard                     l = 2;
341713eb76e0Sbellard                 } else {
34181c213d19Sbellard                     /* 8 bit write access */
3419c27004ecSbellard                     val = ldub_p(buf);
34206c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
342113eb76e0Sbellard                     l = 1;
342213eb76e0Sbellard                 }
342313eb76e0Sbellard             } else {
3424b448f2f3Sbellard                 unsigned long addr1;
3425b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
342613eb76e0Sbellard                 /* RAM case */
34275579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
342813eb76e0Sbellard                 memcpy(ptr, buf, l);
34293a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3430b448f2f3Sbellard                     /* invalidate code */
3431b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3432b448f2f3Sbellard                     /* set dirty bit */
3433f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3434f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
343513eb76e0Sbellard                 }
34363a7d929eSbellard             }
343713eb76e0Sbellard         } else {
34382a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
34392a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3440c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
344113eb76e0Sbellard                 /* I/O case */
344213eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34438da3ff18Spbrook                 if (p)
34446c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34456c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
344613eb76e0Sbellard                     /* 32 bit read access */
34476c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3448c27004ecSbellard                     stl_p(buf, val);
344913eb76e0Sbellard                     l = 4;
34506c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
345113eb76e0Sbellard                     /* 16 bit read access */
34526c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3453c27004ecSbellard                     stw_p(buf, val);
345413eb76e0Sbellard                     l = 2;
345513eb76e0Sbellard                 } else {
34561c213d19Sbellard                     /* 8 bit read access */
34576c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3458c27004ecSbellard                     stb_p(buf, val);
345913eb76e0Sbellard                     l = 1;
346013eb76e0Sbellard                 }
346113eb76e0Sbellard             } else {
346213eb76e0Sbellard                 /* RAM case */
34635579c7f3Spbrook                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
346413eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
346513eb76e0Sbellard                 memcpy(buf, ptr, l);
346613eb76e0Sbellard             }
346713eb76e0Sbellard         }
346813eb76e0Sbellard         len -= l;
346913eb76e0Sbellard         buf += l;
347013eb76e0Sbellard         addr += l;
347113eb76e0Sbellard     }
347213eb76e0Sbellard }
34738df1cd07Sbellard 
3474d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3475c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3476d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3477d0ecd2aaSbellard {
3478d0ecd2aaSbellard     int l;
3479d0ecd2aaSbellard     uint8_t *ptr;
3480c227f099SAnthony Liguori     target_phys_addr_t page;
3481d0ecd2aaSbellard     unsigned long pd;
3482d0ecd2aaSbellard     PhysPageDesc *p;
3483d0ecd2aaSbellard 
3484d0ecd2aaSbellard     while (len > 0) {
3485d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3486d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3487d0ecd2aaSbellard         if (l > len)
3488d0ecd2aaSbellard             l = len;
3489d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3490d0ecd2aaSbellard         if (!p) {
3491d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3492d0ecd2aaSbellard         } else {
3493d0ecd2aaSbellard             pd = p->phys_offset;
3494d0ecd2aaSbellard         }
3495d0ecd2aaSbellard 
3496d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
34972a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
34982a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3499d0ecd2aaSbellard             /* do nothing */
3500d0ecd2aaSbellard         } else {
3501d0ecd2aaSbellard             unsigned long addr1;
3502d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3503d0ecd2aaSbellard             /* ROM/RAM case */
35045579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3505d0ecd2aaSbellard             memcpy(ptr, buf, l);
3506d0ecd2aaSbellard         }
3507d0ecd2aaSbellard         len -= l;
3508d0ecd2aaSbellard         buf += l;
3509d0ecd2aaSbellard         addr += l;
3510d0ecd2aaSbellard     }
3511d0ecd2aaSbellard }
3512d0ecd2aaSbellard 
35136d16c2f8Saliguori typedef struct {
35146d16c2f8Saliguori     void *buffer;
3515c227f099SAnthony Liguori     target_phys_addr_t addr;
3516c227f099SAnthony Liguori     target_phys_addr_t len;
35176d16c2f8Saliguori } BounceBuffer;
35186d16c2f8Saliguori 
35196d16c2f8Saliguori static BounceBuffer bounce;
35206d16c2f8Saliguori 
3521ba223c29Saliguori typedef struct MapClient {
3522ba223c29Saliguori     void *opaque;
3523ba223c29Saliguori     void (*callback)(void *opaque);
352472cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3525ba223c29Saliguori } MapClient;
3526ba223c29Saliguori 
352772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
352872cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3529ba223c29Saliguori 
3530ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3531ba223c29Saliguori {
3532ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3533ba223c29Saliguori 
3534ba223c29Saliguori     client->opaque = opaque;
3535ba223c29Saliguori     client->callback = callback;
353672cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3537ba223c29Saliguori     return client;
3538ba223c29Saliguori }
3539ba223c29Saliguori 
3540ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3541ba223c29Saliguori {
3542ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3543ba223c29Saliguori 
354472cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
354534d5e948SIsaku Yamahata     qemu_free(client);
3546ba223c29Saliguori }
3547ba223c29Saliguori 
3548ba223c29Saliguori static void cpu_notify_map_clients(void)
3549ba223c29Saliguori {
3550ba223c29Saliguori     MapClient *client;
3551ba223c29Saliguori 
355272cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
355372cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3554ba223c29Saliguori         client->callback(client->opaque);
355534d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3556ba223c29Saliguori     }
3557ba223c29Saliguori }
3558ba223c29Saliguori 
35596d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
35606d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
35616d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
35626d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3563ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3564ba223c29Saliguori  * likely to succeed.
35656d16c2f8Saliguori  */
3566c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3567c227f099SAnthony Liguori                               target_phys_addr_t *plen,
35686d16c2f8Saliguori                               int is_write)
35696d16c2f8Saliguori {
3570c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
3571c227f099SAnthony Liguori     target_phys_addr_t done = 0;
35726d16c2f8Saliguori     int l;
35736d16c2f8Saliguori     uint8_t *ret = NULL;
35746d16c2f8Saliguori     uint8_t *ptr;
3575c227f099SAnthony Liguori     target_phys_addr_t page;
35766d16c2f8Saliguori     unsigned long pd;
35776d16c2f8Saliguori     PhysPageDesc *p;
35786d16c2f8Saliguori     unsigned long addr1;
35796d16c2f8Saliguori 
35806d16c2f8Saliguori     while (len > 0) {
35816d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
35826d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
35836d16c2f8Saliguori         if (l > len)
35846d16c2f8Saliguori             l = len;
35856d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
35866d16c2f8Saliguori         if (!p) {
35876d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
35886d16c2f8Saliguori         } else {
35896d16c2f8Saliguori             pd = p->phys_offset;
35906d16c2f8Saliguori         }
35916d16c2f8Saliguori 
35926d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
35936d16c2f8Saliguori             if (done || bounce.buffer) {
35946d16c2f8Saliguori                 break;
35956d16c2f8Saliguori             }
35966d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
35976d16c2f8Saliguori             bounce.addr = addr;
35986d16c2f8Saliguori             bounce.len = l;
35996d16c2f8Saliguori             if (!is_write) {
36006d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
36016d16c2f8Saliguori             }
36026d16c2f8Saliguori             ptr = bounce.buffer;
36036d16c2f8Saliguori         } else {
36046d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
36055579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
36066d16c2f8Saliguori         }
36076d16c2f8Saliguori         if (!done) {
36086d16c2f8Saliguori             ret = ptr;
36096d16c2f8Saliguori         } else if (ret + done != ptr) {
36106d16c2f8Saliguori             break;
36116d16c2f8Saliguori         }
36126d16c2f8Saliguori 
36136d16c2f8Saliguori         len -= l;
36146d16c2f8Saliguori         addr += l;
36156d16c2f8Saliguori         done += l;
36166d16c2f8Saliguori     }
36176d16c2f8Saliguori     *plen = done;
36186d16c2f8Saliguori     return ret;
36196d16c2f8Saliguori }
36206d16c2f8Saliguori 
36216d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
36226d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
36236d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
36246d16c2f8Saliguori  */
3625c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3626c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
36276d16c2f8Saliguori {
36286d16c2f8Saliguori     if (buffer != bounce.buffer) {
36296d16c2f8Saliguori         if (is_write) {
3630c227f099SAnthony Liguori             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
36316d16c2f8Saliguori             while (access_len) {
36326d16c2f8Saliguori                 unsigned l;
36336d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
36346d16c2f8Saliguori                 if (l > access_len)
36356d16c2f8Saliguori                     l = access_len;
36366d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
36376d16c2f8Saliguori                     /* invalidate code */
36386d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
36396d16c2f8Saliguori                     /* set dirty bit */
3640f7c11b53SYoshiaki Tamura                     cpu_physical_memory_set_dirty_flags(
3641f7c11b53SYoshiaki Tamura                         addr1, (0xff & ~CODE_DIRTY_FLAG));
36426d16c2f8Saliguori                 }
36436d16c2f8Saliguori                 addr1 += l;
36446d16c2f8Saliguori                 access_len -= l;
36456d16c2f8Saliguori             }
36466d16c2f8Saliguori         }
36476d16c2f8Saliguori         return;
36486d16c2f8Saliguori     }
36496d16c2f8Saliguori     if (is_write) {
36506d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
36516d16c2f8Saliguori     }
3652f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
36536d16c2f8Saliguori     bounce.buffer = NULL;
3654ba223c29Saliguori     cpu_notify_map_clients();
36556d16c2f8Saliguori }
3656d0ecd2aaSbellard 
36578df1cd07Sbellard /* warning: addr must be aligned */
3658c227f099SAnthony Liguori uint32_t ldl_phys(target_phys_addr_t addr)
36598df1cd07Sbellard {
36608df1cd07Sbellard     int io_index;
36618df1cd07Sbellard     uint8_t *ptr;
36628df1cd07Sbellard     uint32_t val;
36638df1cd07Sbellard     unsigned long pd;
36648df1cd07Sbellard     PhysPageDesc *p;
36658df1cd07Sbellard 
36668df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
36678df1cd07Sbellard     if (!p) {
36688df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
36698df1cd07Sbellard     } else {
36708df1cd07Sbellard         pd = p->phys_offset;
36718df1cd07Sbellard     }
36728df1cd07Sbellard 
36732a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
36742a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
36758df1cd07Sbellard         /* I/O case */
36768df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
36778da3ff18Spbrook         if (p)
36788da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
36798df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
36808df1cd07Sbellard     } else {
36818df1cd07Sbellard         /* RAM case */
36825579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
36838df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
36848df1cd07Sbellard         val = ldl_p(ptr);
36858df1cd07Sbellard     }
36868df1cd07Sbellard     return val;
36878df1cd07Sbellard }
36888df1cd07Sbellard 
368984b7b8e7Sbellard /* warning: addr must be aligned */
3690c227f099SAnthony Liguori uint64_t ldq_phys(target_phys_addr_t addr)
369184b7b8e7Sbellard {
369284b7b8e7Sbellard     int io_index;
369384b7b8e7Sbellard     uint8_t *ptr;
369484b7b8e7Sbellard     uint64_t val;
369584b7b8e7Sbellard     unsigned long pd;
369684b7b8e7Sbellard     PhysPageDesc *p;
369784b7b8e7Sbellard 
369884b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
369984b7b8e7Sbellard     if (!p) {
370084b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
370184b7b8e7Sbellard     } else {
370284b7b8e7Sbellard         pd = p->phys_offset;
370384b7b8e7Sbellard     }
370484b7b8e7Sbellard 
37052a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
37062a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
370784b7b8e7Sbellard         /* I/O case */
370884b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37098da3ff18Spbrook         if (p)
37108da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
371184b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
371284b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
371384b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
371484b7b8e7Sbellard #else
371584b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
371684b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
371784b7b8e7Sbellard #endif
371884b7b8e7Sbellard     } else {
371984b7b8e7Sbellard         /* RAM case */
37205579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
372184b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
372284b7b8e7Sbellard         val = ldq_p(ptr);
372384b7b8e7Sbellard     }
372484b7b8e7Sbellard     return val;
372584b7b8e7Sbellard }
372684b7b8e7Sbellard 
3727aab33094Sbellard /* XXX: optimize */
3728c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3729aab33094Sbellard {
3730aab33094Sbellard     uint8_t val;
3731aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3732aab33094Sbellard     return val;
3733aab33094Sbellard }
3734aab33094Sbellard 
3735733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
3736c227f099SAnthony Liguori uint32_t lduw_phys(target_phys_addr_t addr)
3737aab33094Sbellard {
3738733f0b02SMichael S. Tsirkin     int io_index;
3739733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3740733f0b02SMichael S. Tsirkin     uint64_t val;
3741733f0b02SMichael S. Tsirkin     unsigned long pd;
3742733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
3743733f0b02SMichael S. Tsirkin 
3744733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3745733f0b02SMichael S. Tsirkin     if (!p) {
3746733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
3747733f0b02SMichael S. Tsirkin     } else {
3748733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
3749733f0b02SMichael S. Tsirkin     }
3750733f0b02SMichael S. Tsirkin 
3751733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3752733f0b02SMichael S. Tsirkin         !(pd & IO_MEM_ROMD)) {
3753733f0b02SMichael S. Tsirkin         /* I/O case */
3754733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3755733f0b02SMichael S. Tsirkin         if (p)
3756733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3757733f0b02SMichael S. Tsirkin         val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3758733f0b02SMichael S. Tsirkin     } else {
3759733f0b02SMichael S. Tsirkin         /* RAM case */
3760733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3761733f0b02SMichael S. Tsirkin             (addr & ~TARGET_PAGE_MASK);
3762733f0b02SMichael S. Tsirkin         val = lduw_p(ptr);
3763733f0b02SMichael S. Tsirkin     }
3764733f0b02SMichael S. Tsirkin     return val;
3765aab33094Sbellard }
3766aab33094Sbellard 
37678df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
37688df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
37698df1cd07Sbellard    bits are used to track modified PTEs */
3770c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
37718df1cd07Sbellard {
37728df1cd07Sbellard     int io_index;
37738df1cd07Sbellard     uint8_t *ptr;
37748df1cd07Sbellard     unsigned long pd;
37758df1cd07Sbellard     PhysPageDesc *p;
37768df1cd07Sbellard 
37778df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
37788df1cd07Sbellard     if (!p) {
37798df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
37808df1cd07Sbellard     } else {
37818df1cd07Sbellard         pd = p->phys_offset;
37828df1cd07Sbellard     }
37838df1cd07Sbellard 
37843a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
37858df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37868da3ff18Spbrook         if (p)
37878da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
37888df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
37898df1cd07Sbellard     } else {
379074576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
37915579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
37928df1cd07Sbellard         stl_p(ptr, val);
379374576198Saliguori 
379474576198Saliguori         if (unlikely(in_migration)) {
379574576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
379674576198Saliguori                 /* invalidate code */
379774576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
379874576198Saliguori                 /* set dirty bit */
3799f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
3800f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
380174576198Saliguori             }
380274576198Saliguori         }
38038df1cd07Sbellard     }
38048df1cd07Sbellard }
38058df1cd07Sbellard 
3806c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3807bc98a7efSj_mayer {
3808bc98a7efSj_mayer     int io_index;
3809bc98a7efSj_mayer     uint8_t *ptr;
3810bc98a7efSj_mayer     unsigned long pd;
3811bc98a7efSj_mayer     PhysPageDesc *p;
3812bc98a7efSj_mayer 
3813bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3814bc98a7efSj_mayer     if (!p) {
3815bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3816bc98a7efSj_mayer     } else {
3817bc98a7efSj_mayer         pd = p->phys_offset;
3818bc98a7efSj_mayer     }
3819bc98a7efSj_mayer 
3820bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3821bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38228da3ff18Spbrook         if (p)
38238da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3824bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3825bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3826bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3827bc98a7efSj_mayer #else
3828bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3829bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3830bc98a7efSj_mayer #endif
3831bc98a7efSj_mayer     } else {
38325579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3833bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3834bc98a7efSj_mayer         stq_p(ptr, val);
3835bc98a7efSj_mayer     }
3836bc98a7efSj_mayer }
3837bc98a7efSj_mayer 
38388df1cd07Sbellard /* warning: addr must be aligned */
3839c227f099SAnthony Liguori void stl_phys(target_phys_addr_t addr, uint32_t val)
38408df1cd07Sbellard {
38418df1cd07Sbellard     int io_index;
38428df1cd07Sbellard     uint8_t *ptr;
38438df1cd07Sbellard     unsigned long pd;
38448df1cd07Sbellard     PhysPageDesc *p;
38458df1cd07Sbellard 
38468df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
38478df1cd07Sbellard     if (!p) {
38488df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
38498df1cd07Sbellard     } else {
38508df1cd07Sbellard         pd = p->phys_offset;
38518df1cd07Sbellard     }
38528df1cd07Sbellard 
38533a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38548df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
38558da3ff18Spbrook         if (p)
38568da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
38578df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
38588df1cd07Sbellard     } else {
38598df1cd07Sbellard         unsigned long addr1;
38608df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
38618df1cd07Sbellard         /* RAM case */
38625579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
38638df1cd07Sbellard         stl_p(ptr, val);
38643a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
38658df1cd07Sbellard             /* invalidate code */
38668df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
38678df1cd07Sbellard             /* set dirty bit */
3868f7c11b53SYoshiaki Tamura             cpu_physical_memory_set_dirty_flags(addr1,
3869f7c11b53SYoshiaki Tamura                 (0xff & ~CODE_DIRTY_FLAG));
38708df1cd07Sbellard         }
38718df1cd07Sbellard     }
38723a7d929eSbellard }
38738df1cd07Sbellard 
3874aab33094Sbellard /* XXX: optimize */
3875c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3876aab33094Sbellard {
3877aab33094Sbellard     uint8_t v = val;
3878aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3879aab33094Sbellard }
3880aab33094Sbellard 
3881733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
3882c227f099SAnthony Liguori void stw_phys(target_phys_addr_t addr, uint32_t val)
3883aab33094Sbellard {
3884733f0b02SMichael S. Tsirkin     int io_index;
3885733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3886733f0b02SMichael S. Tsirkin     unsigned long pd;
3887733f0b02SMichael S. Tsirkin     PhysPageDesc *p;
3888733f0b02SMichael S. Tsirkin 
3889733f0b02SMichael S. Tsirkin     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3890733f0b02SMichael S. Tsirkin     if (!p) {
3891733f0b02SMichael S. Tsirkin         pd = IO_MEM_UNASSIGNED;
3892733f0b02SMichael S. Tsirkin     } else {
3893733f0b02SMichael S. Tsirkin         pd = p->phys_offset;
3894733f0b02SMichael S. Tsirkin     }
3895733f0b02SMichael S. Tsirkin 
3896733f0b02SMichael S. Tsirkin     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3897733f0b02SMichael S. Tsirkin         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3898733f0b02SMichael S. Tsirkin         if (p)
3899733f0b02SMichael S. Tsirkin             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3900733f0b02SMichael S. Tsirkin         io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3901733f0b02SMichael S. Tsirkin     } else {
3902733f0b02SMichael S. Tsirkin         unsigned long addr1;
3903733f0b02SMichael S. Tsirkin         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3904733f0b02SMichael S. Tsirkin         /* RAM case */
3905733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
3906733f0b02SMichael S. Tsirkin         stw_p(ptr, val);
3907733f0b02SMichael S. Tsirkin         if (!cpu_physical_memory_is_dirty(addr1)) {
3908733f0b02SMichael S. Tsirkin             /* invalidate code */
3909733f0b02SMichael S. Tsirkin             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3910733f0b02SMichael S. Tsirkin             /* set dirty bit */
3911733f0b02SMichael S. Tsirkin             cpu_physical_memory_set_dirty_flags(addr1,
3912733f0b02SMichael S. Tsirkin                 (0xff & ~CODE_DIRTY_FLAG));
3913733f0b02SMichael S. Tsirkin         }
3914733f0b02SMichael S. Tsirkin     }
3915aab33094Sbellard }
3916aab33094Sbellard 
3917aab33094Sbellard /* XXX: optimize */
3918c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
3919aab33094Sbellard {
3920aab33094Sbellard     val = tswap64(val);
3921aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3922aab33094Sbellard }
3923aab33094Sbellard 
39245e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3925b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3926b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
392713eb76e0Sbellard {
392813eb76e0Sbellard     int l;
3929c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
39309b3c35e0Sj_mayer     target_ulong page;
393113eb76e0Sbellard 
393213eb76e0Sbellard     while (len > 0) {
393313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
393413eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
393513eb76e0Sbellard         /* if no physical page mapped, return an error */
393613eb76e0Sbellard         if (phys_addr == -1)
393713eb76e0Sbellard             return -1;
393813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
393913eb76e0Sbellard         if (l > len)
394013eb76e0Sbellard             l = len;
39415e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
39425e2972fdSaliguori         if (is_write)
39435e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
39445e2972fdSaliguori         else
39455e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
394613eb76e0Sbellard         len -= l;
394713eb76e0Sbellard         buf += l;
394813eb76e0Sbellard         addr += l;
394913eb76e0Sbellard     }
395013eb76e0Sbellard     return 0;
395113eb76e0Sbellard }
3952a68fe89cSPaul Brook #endif
395313eb76e0Sbellard 
39542e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
39552e70f6efSpbrook    must be at the end of the TB */
39562e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
39572e70f6efSpbrook {
39582e70f6efSpbrook     TranslationBlock *tb;
39592e70f6efSpbrook     uint32_t n, cflags;
39602e70f6efSpbrook     target_ulong pc, cs_base;
39612e70f6efSpbrook     uint64_t flags;
39622e70f6efSpbrook 
39632e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
39642e70f6efSpbrook     if (!tb) {
39652e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
39662e70f6efSpbrook                   retaddr);
39672e70f6efSpbrook     }
39682e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
39692e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
39702e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3971bf20dc07Sths        occurred.  */
39722e70f6efSpbrook     n = n - env->icount_decr.u16.low;
39732e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
39742e70f6efSpbrook     n++;
39752e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
39762e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3977bf20dc07Sths        the first instruction in a TB then re-execute the preceding
39782e70f6efSpbrook        branch.  */
39792e70f6efSpbrook #if defined(TARGET_MIPS)
39802e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
39812e70f6efSpbrook         env->active_tc.PC -= 4;
39822e70f6efSpbrook         env->icount_decr.u16.low++;
39832e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
39842e70f6efSpbrook     }
39852e70f6efSpbrook #elif defined(TARGET_SH4)
39862e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
39872e70f6efSpbrook             && n > 1) {
39882e70f6efSpbrook         env->pc -= 2;
39892e70f6efSpbrook         env->icount_decr.u16.low++;
39902e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
39912e70f6efSpbrook     }
39922e70f6efSpbrook #endif
39932e70f6efSpbrook     /* This should never happen.  */
39942e70f6efSpbrook     if (n > CF_COUNT_MASK)
39952e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
39962e70f6efSpbrook 
39972e70f6efSpbrook     cflags = n | CF_LAST_IO;
39982e70f6efSpbrook     pc = tb->pc;
39992e70f6efSpbrook     cs_base = tb->cs_base;
40002e70f6efSpbrook     flags = tb->flags;
40012e70f6efSpbrook     tb_phys_invalidate(tb, -1);
40022e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
40032e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
40042e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4005bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
40062e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
40072e70f6efSpbrook        repeating the fault, which is horribly inefficient.
40082e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
40092e70f6efSpbrook        second new TB.  */
40102e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
40112e70f6efSpbrook }
40122e70f6efSpbrook 
4013b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4014b3755a91SPaul Brook 
4015e3db7226Sbellard void dump_exec_info(FILE *f,
4016e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4017e3db7226Sbellard {
4018e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4019e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4020e3db7226Sbellard     TranslationBlock *tb;
4021e3db7226Sbellard 
4022e3db7226Sbellard     target_code_size = 0;
4023e3db7226Sbellard     max_target_code_size = 0;
4024e3db7226Sbellard     cross_page = 0;
4025e3db7226Sbellard     direct_jmp_count = 0;
4026e3db7226Sbellard     direct_jmp2_count = 0;
4027e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4028e3db7226Sbellard         tb = &tbs[i];
4029e3db7226Sbellard         target_code_size += tb->size;
4030e3db7226Sbellard         if (tb->size > max_target_code_size)
4031e3db7226Sbellard             max_target_code_size = tb->size;
4032e3db7226Sbellard         if (tb->page_addr[1] != -1)
4033e3db7226Sbellard             cross_page++;
4034e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4035e3db7226Sbellard             direct_jmp_count++;
4036e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4037e3db7226Sbellard                 direct_jmp2_count++;
4038e3db7226Sbellard             }
4039e3db7226Sbellard         }
4040e3db7226Sbellard     }
4041e3db7226Sbellard     /* XXX: avoid using doubles ? */
404257fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
404326a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
404426a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
404526a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
404626a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4047e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4048e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4049e3db7226Sbellard                 max_target_code_size);
4050e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
4051e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4052e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4053e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4054e3db7226Sbellard             cross_page,
4055e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4056e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4057e3db7226Sbellard                 direct_jmp_count,
4058e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4059e3db7226Sbellard                 direct_jmp2_count,
4060e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
406157fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4062e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4063e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4064e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4065b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4066e3db7226Sbellard }
4067e3db7226Sbellard 
406861382a50Sbellard #define MMUSUFFIX _cmmu
406961382a50Sbellard #define GETPC() NULL
407061382a50Sbellard #define env cpu_single_env
4071b769d8feSbellard #define SOFTMMU_CODE_ACCESS
407261382a50Sbellard 
407361382a50Sbellard #define SHIFT 0
407461382a50Sbellard #include "softmmu_template.h"
407561382a50Sbellard 
407661382a50Sbellard #define SHIFT 1
407761382a50Sbellard #include "softmmu_template.h"
407861382a50Sbellard 
407961382a50Sbellard #define SHIFT 2
408061382a50Sbellard #include "softmmu_template.h"
408161382a50Sbellard 
408261382a50Sbellard #define SHIFT 3
408361382a50Sbellard #include "softmmu_template.h"
408461382a50Sbellard 
408561382a50Sbellard #undef env
408661382a50Sbellard 
408761382a50Sbellard #endif
4088