xref: /qemu/system/physmem.c (revision c5e97233e8c8e53aab1b80b5e9891a71c4edea3e)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
18fad6cb1aSaurel32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
224fddf62aSths #define WIN32_LEAN_AND_MEAN
23d5a8f07cSbellard #include <windows.h>
24d5a8f07cSbellard #else
25a98d49b1Sbellard #include <sys/types.h>
26d5a8f07cSbellard #include <sys/mman.h>
27d5a8f07cSbellard #endif
2854936004Sbellard #include <stdlib.h>
2954936004Sbellard #include <stdio.h>
3054936004Sbellard #include <stdarg.h>
3154936004Sbellard #include <string.h>
3254936004Sbellard #include <errno.h>
3354936004Sbellard #include <unistd.h>
3454936004Sbellard #include <inttypes.h>
3554936004Sbellard 
366180a181Sbellard #include "cpu.h"
376180a181Sbellard #include "exec-all.h"
38ca10f867Saurel32 #include "qemu-common.h"
39b67d9a52Sbellard #include "tcg.h"
40b3c7724cSpbrook #include "hw/hw.h"
4174576198Saliguori #include "osdep.h"
427ba1e619Saliguori #include "kvm.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
4553a5960aSpbrook #endif
4654936004Sbellard 
47fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4866e85a21Sbellard //#define DEBUG_FLUSH
499fa3e853Sbellard //#define DEBUG_TLB
5067d3b957Spbrook //#define DEBUG_UNASSIGNED
51fd6ce8f6Sbellard 
52fd6ce8f6Sbellard /* make various TB consistency checks */
53fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5498857888Sbellard //#define DEBUG_TLB_CHECK
55fd6ce8f6Sbellard 
561196be37Sths //#define DEBUG_IOPORT
57db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
581196be37Sths 
5999773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
6099773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
6199773bd4Spbrook #undef DEBUG_TB_CHECK
6299773bd4Spbrook #endif
6399773bd4Spbrook 
649fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
659fa3e853Sbellard 
669fa3e853Sbellard #define MMAP_AREA_START        0x00000000
679fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
68fd6ce8f6Sbellard 
69108c49b8Sbellard #if defined(TARGET_SPARC64)
70108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
715dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
725dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
74bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
75bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
76108c49b8Sbellard #elif defined(TARGET_PPC64)
77108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
7800f82b8aSaurel32 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
7900f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
8000f82b8aSaurel32 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
8100f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82108c49b8Sbellard #else
83108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
85108c49b8Sbellard #endif
86108c49b8Sbellard 
87bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8826a5f13bSbellard int code_gen_max_blocks;
899fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90bdaf78e0Sblueswir1 static int nb_tbs;
91eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
92eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93fd6ce8f6Sbellard 
94141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
95141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
97d03d860bSblueswir1  section close to code segment. */
98d03d860bSblueswir1 #define code_gen_section                                \
99d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
100d03d860bSblueswir1     __attribute__((aligned (32)))
101d03d860bSblueswir1 #else
102d03d860bSblueswir1 #define code_gen_section                                \
103d03d860bSblueswir1     __attribute__((aligned (32)))
104d03d860bSblueswir1 #endif
105d03d860bSblueswir1 
106d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
107bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
108bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10926a5f13bSbellard /* threshold to flush the translated code buffer */
110bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
111fd6ce8f6Sbellard uint8_t *code_gen_ptr;
112fd6ce8f6Sbellard 
113e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
11400f82b8aSaurel32 ram_addr_t phys_ram_size;
1159fa3e853Sbellard int phys_ram_fd;
1169fa3e853Sbellard uint8_t *phys_ram_base;
1171ccde1cbSbellard uint8_t *phys_ram_dirty;
11874576198Saliguori static int in_migration;
119e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
120e2eef170Spbrook #endif
1219fa3e853Sbellard 
1226a00d601Sbellard CPUState *first_cpu;
1236a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1246a00d601Sbellard    cpu_exec() */
1256a00d601Sbellard CPUState *cpu_single_env;
1262e70f6efSpbrook /* 0 = Do not count executed instructions.
127bf20dc07Sths    1 = Precise instruction counting.
1282e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1292e70f6efSpbrook int use_icount = 0;
1302e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1312e70f6efSpbrook    include some instructions that have not yet been executed.  */
1322e70f6efSpbrook int64_t qemu_icount;
1336a00d601Sbellard 
13454936004Sbellard typedef struct PageDesc {
13592e873b9Sbellard     /* list of TBs intersecting this ram page */
136fd6ce8f6Sbellard     TranslationBlock *first_tb;
1379fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1389fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1399fa3e853Sbellard     unsigned int code_write_count;
1409fa3e853Sbellard     uint8_t *code_bitmap;
1419fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1429fa3e853Sbellard     unsigned long flags;
1439fa3e853Sbellard #endif
14454936004Sbellard } PageDesc;
14554936004Sbellard 
14692e873b9Sbellard typedef struct PhysPageDesc {
1470f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
14800f82b8aSaurel32     ram_addr_t phys_offset;
1498da3ff18Spbrook     ram_addr_t region_offset;
15092e873b9Sbellard } PhysPageDesc;
15192e873b9Sbellard 
15254936004Sbellard #define L2_BITS 10
153bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
155bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
156bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
157bedb69eaSj_mayer  */
158bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159bedb69eaSj_mayer #else
16003875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161bedb69eaSj_mayer #endif
16254936004Sbellard 
16354936004Sbellard #define L1_SIZE (1 << L1_BITS)
16454936004Sbellard #define L2_SIZE (1 << L2_BITS)
16554936004Sbellard 
16683fb7adfSbellard unsigned long qemu_real_host_page_size;
16783fb7adfSbellard unsigned long qemu_host_page_bits;
16883fb7adfSbellard unsigned long qemu_host_page_size;
16983fb7adfSbellard unsigned long qemu_host_page_mask;
17054936004Sbellard 
17192e873b9Sbellard /* XXX: for system emulation, it could just be an array */
17254936004Sbellard static PageDesc *l1_map[L1_SIZE];
173bdaf78e0Sblueswir1 static PhysPageDesc **l1_phys_map;
17454936004Sbellard 
175e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
176e2eef170Spbrook static void io_mem_init(void);
177e2eef170Spbrook 
17833417e70Sbellard /* io memory support */
17933417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
18033417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
1836658ffb8Spbrook static int io_mem_watch;
1846658ffb8Spbrook #endif
18533417e70Sbellard 
18634865134Sbellard /* log support */
187d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
18834865134Sbellard FILE *logfile;
18934865134Sbellard int loglevel;
190e735b91cSpbrook static int log_append = 0;
19134865134Sbellard 
192e3db7226Sbellard /* statistics */
193e3db7226Sbellard static int tlb_flush_count;
194e3db7226Sbellard static int tb_flush_count;
195e3db7226Sbellard static int tb_phys_invalidate_count;
196e3db7226Sbellard 
197db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198db7b5426Sblueswir1 typedef struct subpage_t {
199db7b5426Sblueswir1     target_phys_addr_t base;
2003ee89922Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
2013ee89922Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
2023ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
2038da3ff18Spbrook     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204db7b5426Sblueswir1 } subpage_t;
205db7b5426Sblueswir1 
2067cb69caeSbellard #ifdef _WIN32
2077cb69caeSbellard static void map_exec(void *addr, long size)
2087cb69caeSbellard {
2097cb69caeSbellard     DWORD old_protect;
2107cb69caeSbellard     VirtualProtect(addr, size,
2117cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2127cb69caeSbellard 
2137cb69caeSbellard }
2147cb69caeSbellard #else
2157cb69caeSbellard static void map_exec(void *addr, long size)
2167cb69caeSbellard {
2174369415fSbellard     unsigned long start, end, page_size;
2187cb69caeSbellard 
2194369415fSbellard     page_size = getpagesize();
2207cb69caeSbellard     start = (unsigned long)addr;
2214369415fSbellard     start &= ~(page_size - 1);
2227cb69caeSbellard 
2237cb69caeSbellard     end = (unsigned long)addr + size;
2244369415fSbellard     end += page_size - 1;
2254369415fSbellard     end &= ~(page_size - 1);
2267cb69caeSbellard 
2277cb69caeSbellard     mprotect((void *)start, end - start,
2287cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2297cb69caeSbellard }
2307cb69caeSbellard #endif
2317cb69caeSbellard 
232b346ff46Sbellard static void page_init(void)
23354936004Sbellard {
23483fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
23554936004Sbellard        TARGET_PAGE_SIZE */
236c2b48b69Saliguori #ifdef _WIN32
237c2b48b69Saliguori     {
238c2b48b69Saliguori         SYSTEM_INFO system_info;
239c2b48b69Saliguori 
240c2b48b69Saliguori         GetSystemInfo(&system_info);
241c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
242c2b48b69Saliguori     }
243c2b48b69Saliguori #else
244c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
245c2b48b69Saliguori #endif
24683fb7adfSbellard     if (qemu_host_page_size == 0)
24783fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
24883fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
24983fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
25083fb7adfSbellard     qemu_host_page_bits = 0;
25183fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
25283fb7adfSbellard         qemu_host_page_bits++;
25383fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
254108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
25650a9569bSbalrog 
25750a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
25850a9569bSbalrog     {
25950a9569bSbalrog         long long startaddr, endaddr;
26050a9569bSbalrog         FILE *f;
26150a9569bSbalrog         int n;
26250a9569bSbalrog 
263c8a706feSpbrook         mmap_lock();
2640776590dSpbrook         last_brk = (unsigned long)sbrk(0);
26550a9569bSbalrog         f = fopen("/proc/self/maps", "r");
26650a9569bSbalrog         if (f) {
26750a9569bSbalrog             do {
26850a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
26950a9569bSbalrog                 if (n == 2) {
270e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
271e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
273e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
27550a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
27650a9569bSbalrog                                    PAGE_RESERVED);
27750a9569bSbalrog                 }
27850a9569bSbalrog             } while (!feof(f));
27950a9569bSbalrog             fclose(f);
28050a9569bSbalrog         }
281c8a706feSpbrook         mmap_unlock();
28250a9569bSbalrog     }
28350a9569bSbalrog #endif
28454936004Sbellard }
28554936004Sbellard 
286434929bfSaliguori static inline PageDesc **page_l1_map(target_ulong index)
28754936004Sbellard {
28817e2377aSpbrook #if TARGET_LONG_BITS > 32
28917e2377aSpbrook     /* Host memory outside guest VM.  For 32-bit targets we have already
29017e2377aSpbrook        excluded high addresses.  */
291d8173e0fSths     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
29217e2377aSpbrook         return NULL;
29317e2377aSpbrook #endif
294434929bfSaliguori     return &l1_map[index >> L2_BITS];
295434929bfSaliguori }
296434929bfSaliguori 
297434929bfSaliguori static inline PageDesc *page_find_alloc(target_ulong index)
298434929bfSaliguori {
299434929bfSaliguori     PageDesc **lp, *p;
300434929bfSaliguori     lp = page_l1_map(index);
301434929bfSaliguori     if (!lp)
302434929bfSaliguori         return NULL;
303434929bfSaliguori 
30454936004Sbellard     p = *lp;
30554936004Sbellard     if (!p) {
30654936004Sbellard         /* allocate if not found */
30717e2377aSpbrook #if defined(CONFIG_USER_ONLY)
30817e2377aSpbrook         size_t len = sizeof(PageDesc) * L2_SIZE;
30917e2377aSpbrook         /* Don't use qemu_malloc because it may recurse.  */
31017e2377aSpbrook         p = mmap(0, len, PROT_READ | PROT_WRITE,
31117e2377aSpbrook                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
31254936004Sbellard         *lp = p;
313fb1c2cd7Saurel32         if (h2g_valid(p)) {
314fb1c2cd7Saurel32             unsigned long addr = h2g(p);
31517e2377aSpbrook             page_set_flags(addr & TARGET_PAGE_MASK,
31617e2377aSpbrook                            TARGET_PAGE_ALIGN(addr + len),
31717e2377aSpbrook                            PAGE_RESERVED);
31817e2377aSpbrook         }
31917e2377aSpbrook #else
32017e2377aSpbrook         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
32117e2377aSpbrook         *lp = p;
32217e2377aSpbrook #endif
32354936004Sbellard     }
32454936004Sbellard     return p + (index & (L2_SIZE - 1));
32554936004Sbellard }
32654936004Sbellard 
32700f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
32854936004Sbellard {
329434929bfSaliguori     PageDesc **lp, *p;
330434929bfSaliguori     lp = page_l1_map(index);
331434929bfSaliguori     if (!lp)
332434929bfSaliguori         return NULL;
33354936004Sbellard 
334434929bfSaliguori     p = *lp;
33554936004Sbellard     if (!p)
33654936004Sbellard         return 0;
337fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
33854936004Sbellard }
33954936004Sbellard 
340108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
34192e873b9Sbellard {
342108c49b8Sbellard     void **lp, **p;
343e3f4e2a4Spbrook     PhysPageDesc *pd;
34492e873b9Sbellard 
345108c49b8Sbellard     p = (void **)l1_phys_map;
346108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347108c49b8Sbellard 
348108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350108c49b8Sbellard #endif
351108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
35292e873b9Sbellard     p = *lp;
35392e873b9Sbellard     if (!p) {
35492e873b9Sbellard         /* allocate if not found */
355108c49b8Sbellard         if (!alloc)
356108c49b8Sbellard             return NULL;
357108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
359108c49b8Sbellard         *lp = p;
360108c49b8Sbellard     }
361108c49b8Sbellard #endif
362108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363e3f4e2a4Spbrook     pd = *lp;
364e3f4e2a4Spbrook     if (!pd) {
365e3f4e2a4Spbrook         int i;
366108c49b8Sbellard         /* allocate if not found */
367108c49b8Sbellard         if (!alloc)
368108c49b8Sbellard             return NULL;
369e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370e3f4e2a4Spbrook         *lp = pd;
37167c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
372e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
37367c4d23cSpbrook           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
37467c4d23cSpbrook         }
37592e873b9Sbellard     }
376e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
37792e873b9Sbellard }
37892e873b9Sbellard 
379108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
38092e873b9Sbellard {
381108c49b8Sbellard     return phys_page_find_alloc(index, 0);
38292e873b9Sbellard }
38392e873b9Sbellard 
3849fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3856a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3863a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3873a7d929eSbellard                                     target_ulong vaddr);
388c8a706feSpbrook #define mmap_lock() do { } while(0)
389c8a706feSpbrook #define mmap_unlock() do { } while(0)
3909fa3e853Sbellard #endif
391fd6ce8f6Sbellard 
3924369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
3934369415fSbellard 
3944369415fSbellard #if defined(CONFIG_USER_ONLY)
3954369415fSbellard /* Currently it is not recommanded to allocate big chunks of data in
3964369415fSbellard    user mode. It will change when a dedicated libc will be used */
3974369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
3984369415fSbellard #endif
3994369415fSbellard 
4004369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4014369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
4024369415fSbellard #endif
4034369415fSbellard 
4048fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
40526a5f13bSbellard {
4064369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4074369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4084369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4094369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4104369415fSbellard #else
41126a5f13bSbellard     code_gen_buffer_size = tb_size;
41226a5f13bSbellard     if (code_gen_buffer_size == 0) {
4134369415fSbellard #if defined(CONFIG_USER_ONLY)
4144369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4154369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4164369415fSbellard #else
41726a5f13bSbellard         /* XXX: needs ajustments */
418174a9a1fSaliguori         code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4194369415fSbellard #endif
42026a5f13bSbellard     }
42126a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
42226a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
42326a5f13bSbellard     /* The code gen buffer location may have constraints depending on
42426a5f13bSbellard        the host cpu and OS */
42526a5f13bSbellard #if defined(__linux__)
42626a5f13bSbellard     {
42726a5f13bSbellard         int flags;
428141ac468Sblueswir1         void *start = NULL;
429141ac468Sblueswir1 
43026a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
43126a5f13bSbellard #if defined(__x86_64__)
43226a5f13bSbellard         flags |= MAP_32BIT;
43326a5f13bSbellard         /* Cannot map more than that */
43426a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
43526a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
436141ac468Sblueswir1 #elif defined(__sparc_v9__)
437141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
438141ac468Sblueswir1         flags |= MAP_FIXED;
439141ac468Sblueswir1         start = (void *) 0x60000000UL;
440141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
441141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
4421cb0661eSbalrog #elif defined(__arm__)
44363d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
4441cb0661eSbalrog         flags |= MAP_FIXED;
4451cb0661eSbalrog         start = (void *) 0x01000000UL;
4461cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
4471cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
44826a5f13bSbellard #endif
449141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
45026a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
45126a5f13bSbellard                                flags, -1, 0);
45226a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
45326a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
45426a5f13bSbellard             exit(1);
45526a5f13bSbellard         }
45626a5f13bSbellard     }
457c5e97233Sblueswir1 #elif defined(__FreeBSD__) || defined(__DragonFly__)
45806e67a82Saliguori     {
45906e67a82Saliguori         int flags;
46006e67a82Saliguori         void *addr = NULL;
46106e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
46206e67a82Saliguori #if defined(__x86_64__)
46306e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
46406e67a82Saliguori          * 0x40000000 is free */
46506e67a82Saliguori         flags |= MAP_FIXED;
46606e67a82Saliguori         addr = (void *)0x40000000;
46706e67a82Saliguori         /* Cannot map more than that */
46806e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
46906e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
47006e67a82Saliguori #endif
47106e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
47206e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
47306e67a82Saliguori                                flags, -1, 0);
47406e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
47506e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
47606e67a82Saliguori             exit(1);
47706e67a82Saliguori         }
47806e67a82Saliguori     }
47926a5f13bSbellard #else
48026a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
48126a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
48226a5f13bSbellard #endif
4834369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
48426a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
48526a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
48626a5f13bSbellard         code_gen_max_block_size();
48726a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
48826a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
48926a5f13bSbellard }
49026a5f13bSbellard 
49126a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
49226a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
49326a5f13bSbellard    size. */
49426a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
49526a5f13bSbellard {
49626a5f13bSbellard     cpu_gen_init();
49726a5f13bSbellard     code_gen_alloc(tb_size);
49826a5f13bSbellard     code_gen_ptr = code_gen_buffer;
4994369415fSbellard     page_init();
500e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
50126a5f13bSbellard     io_mem_init();
502e2eef170Spbrook #endif
50326a5f13bSbellard }
50426a5f13bSbellard 
5059656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5069656f324Spbrook 
5079656f324Spbrook #define CPU_COMMON_SAVE_VERSION 1
5089656f324Spbrook 
5099656f324Spbrook static void cpu_common_save(QEMUFile *f, void *opaque)
5109656f324Spbrook {
5119656f324Spbrook     CPUState *env = opaque;
5129656f324Spbrook 
5139656f324Spbrook     qemu_put_be32s(f, &env->halted);
5149656f324Spbrook     qemu_put_be32s(f, &env->interrupt_request);
5159656f324Spbrook }
5169656f324Spbrook 
5179656f324Spbrook static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
5189656f324Spbrook {
5199656f324Spbrook     CPUState *env = opaque;
5209656f324Spbrook 
5219656f324Spbrook     if (version_id != CPU_COMMON_SAVE_VERSION)
5229656f324Spbrook         return -EINVAL;
5239656f324Spbrook 
5249656f324Spbrook     qemu_get_be32s(f, &env->halted);
52575f482aeSpbrook     qemu_get_be32s(f, &env->interrupt_request);
5269656f324Spbrook     tlb_flush(env, 1);
5279656f324Spbrook 
5289656f324Spbrook     return 0;
5299656f324Spbrook }
5309656f324Spbrook #endif
5319656f324Spbrook 
5326a00d601Sbellard void cpu_exec_init(CPUState *env)
533fd6ce8f6Sbellard {
5346a00d601Sbellard     CPUState **penv;
5356a00d601Sbellard     int cpu_index;
5366a00d601Sbellard 
537c2764719Spbrook #if defined(CONFIG_USER_ONLY)
538c2764719Spbrook     cpu_list_lock();
539c2764719Spbrook #endif
5406a00d601Sbellard     env->next_cpu = NULL;
5416a00d601Sbellard     penv = &first_cpu;
5426a00d601Sbellard     cpu_index = 0;
5436a00d601Sbellard     while (*penv != NULL) {
5446a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
5456a00d601Sbellard         cpu_index++;
5466a00d601Sbellard     }
5476a00d601Sbellard     env->cpu_index = cpu_index;
548c0ce998eSaliguori     TAILQ_INIT(&env->breakpoints);
549c0ce998eSaliguori     TAILQ_INIT(&env->watchpoints);
5506a00d601Sbellard     *penv = env;
551c2764719Spbrook #if defined(CONFIG_USER_ONLY)
552c2764719Spbrook     cpu_list_unlock();
553c2764719Spbrook #endif
554b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5559656f324Spbrook     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
5569656f324Spbrook                     cpu_common_save, cpu_common_load, env);
557b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558b3c7724cSpbrook                     cpu_save, cpu_load, env);
559b3c7724cSpbrook #endif
560fd6ce8f6Sbellard }
561fd6ce8f6Sbellard 
5629fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
5639fa3e853Sbellard {
5649fa3e853Sbellard     if (p->code_bitmap) {
56559817ccbSbellard         qemu_free(p->code_bitmap);
5669fa3e853Sbellard         p->code_bitmap = NULL;
5679fa3e853Sbellard     }
5689fa3e853Sbellard     p->code_write_count = 0;
5699fa3e853Sbellard }
5709fa3e853Sbellard 
571fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
572fd6ce8f6Sbellard static void page_flush_tb(void)
573fd6ce8f6Sbellard {
574fd6ce8f6Sbellard     int i, j;
575fd6ce8f6Sbellard     PageDesc *p;
576fd6ce8f6Sbellard 
577fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
578fd6ce8f6Sbellard         p = l1_map[i];
579fd6ce8f6Sbellard         if (p) {
5809fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
5819fa3e853Sbellard                 p->first_tb = NULL;
5829fa3e853Sbellard                 invalidate_page_bitmap(p);
5839fa3e853Sbellard                 p++;
5849fa3e853Sbellard             }
585fd6ce8f6Sbellard         }
586fd6ce8f6Sbellard     }
587fd6ce8f6Sbellard }
588fd6ce8f6Sbellard 
589fd6ce8f6Sbellard /* flush all the translation blocks */
590d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
5916a00d601Sbellard void tb_flush(CPUState *env1)
592fd6ce8f6Sbellard {
5936a00d601Sbellard     CPUState *env;
5940124311eSbellard #if defined(DEBUG_FLUSH)
595ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
597ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
598ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
599fd6ce8f6Sbellard #endif
60026a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
601a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
602a208e54aSpbrook 
603fd6ce8f6Sbellard     nb_tbs = 0;
6046a00d601Sbellard 
6056a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
6068a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
6076a00d601Sbellard     }
6089fa3e853Sbellard 
6098a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
610fd6ce8f6Sbellard     page_flush_tb();
6119fa3e853Sbellard 
612fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
613d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
614d4e8164fSbellard        expensive */
615e3db7226Sbellard     tb_flush_count++;
616fd6ce8f6Sbellard }
617fd6ce8f6Sbellard 
618fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
619fd6ce8f6Sbellard 
620bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
621fd6ce8f6Sbellard {
622fd6ce8f6Sbellard     TranslationBlock *tb;
623fd6ce8f6Sbellard     int i;
624fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
62599773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
62699773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
627fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
629fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
63099773bd4Spbrook                        address, (long)tb->pc, tb->size);
631fd6ce8f6Sbellard             }
632fd6ce8f6Sbellard         }
633fd6ce8f6Sbellard     }
634fd6ce8f6Sbellard }
635fd6ce8f6Sbellard 
636fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
637fd6ce8f6Sbellard static void tb_page_check(void)
638fd6ce8f6Sbellard {
639fd6ce8f6Sbellard     TranslationBlock *tb;
640fd6ce8f6Sbellard     int i, flags1, flags2;
641fd6ce8f6Sbellard 
64299773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
64399773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
644fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
645fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
646fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
64899773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
649fd6ce8f6Sbellard             }
650fd6ce8f6Sbellard         }
651fd6ce8f6Sbellard     }
652fd6ce8f6Sbellard }
653fd6ce8f6Sbellard 
654bdaf78e0Sblueswir1 static void tb_jmp_check(TranslationBlock *tb)
655d4e8164fSbellard {
656d4e8164fSbellard     TranslationBlock *tb1;
657d4e8164fSbellard     unsigned int n1;
658d4e8164fSbellard 
659d4e8164fSbellard     /* suppress any remaining jumps to this TB */
660d4e8164fSbellard     tb1 = tb->jmp_first;
661d4e8164fSbellard     for(;;) {
662d4e8164fSbellard         n1 = (long)tb1 & 3;
663d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
664d4e8164fSbellard         if (n1 == 2)
665d4e8164fSbellard             break;
666d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
667d4e8164fSbellard     }
668d4e8164fSbellard     /* check end of list */
669d4e8164fSbellard     if (tb1 != tb) {
670d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671d4e8164fSbellard     }
672d4e8164fSbellard }
673d4e8164fSbellard 
674fd6ce8f6Sbellard #endif
675fd6ce8f6Sbellard 
676fd6ce8f6Sbellard /* invalidate one TB */
677fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
678fd6ce8f6Sbellard                              int next_offset)
679fd6ce8f6Sbellard {
680fd6ce8f6Sbellard     TranslationBlock *tb1;
681fd6ce8f6Sbellard     for(;;) {
682fd6ce8f6Sbellard         tb1 = *ptb;
683fd6ce8f6Sbellard         if (tb1 == tb) {
684fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
685fd6ce8f6Sbellard             break;
686fd6ce8f6Sbellard         }
687fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688fd6ce8f6Sbellard     }
689fd6ce8f6Sbellard }
690fd6ce8f6Sbellard 
6919fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
6929fa3e853Sbellard {
6939fa3e853Sbellard     TranslationBlock *tb1;
6949fa3e853Sbellard     unsigned int n1;
6959fa3e853Sbellard 
6969fa3e853Sbellard     for(;;) {
6979fa3e853Sbellard         tb1 = *ptb;
6989fa3e853Sbellard         n1 = (long)tb1 & 3;
6999fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7009fa3e853Sbellard         if (tb1 == tb) {
7019fa3e853Sbellard             *ptb = tb1->page_next[n1];
7029fa3e853Sbellard             break;
7039fa3e853Sbellard         }
7049fa3e853Sbellard         ptb = &tb1->page_next[n1];
7059fa3e853Sbellard     }
7069fa3e853Sbellard }
7079fa3e853Sbellard 
708d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
709d4e8164fSbellard {
710d4e8164fSbellard     TranslationBlock *tb1, **ptb;
711d4e8164fSbellard     unsigned int n1;
712d4e8164fSbellard 
713d4e8164fSbellard     ptb = &tb->jmp_next[n];
714d4e8164fSbellard     tb1 = *ptb;
715d4e8164fSbellard     if (tb1) {
716d4e8164fSbellard         /* find tb(n) in circular list */
717d4e8164fSbellard         for(;;) {
718d4e8164fSbellard             tb1 = *ptb;
719d4e8164fSbellard             n1 = (long)tb1 & 3;
720d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
721d4e8164fSbellard             if (n1 == n && tb1 == tb)
722d4e8164fSbellard                 break;
723d4e8164fSbellard             if (n1 == 2) {
724d4e8164fSbellard                 ptb = &tb1->jmp_first;
725d4e8164fSbellard             } else {
726d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
727d4e8164fSbellard             }
728d4e8164fSbellard         }
729d4e8164fSbellard         /* now we can suppress tb(n) from the list */
730d4e8164fSbellard         *ptb = tb->jmp_next[n];
731d4e8164fSbellard 
732d4e8164fSbellard         tb->jmp_next[n] = NULL;
733d4e8164fSbellard     }
734d4e8164fSbellard }
735d4e8164fSbellard 
736d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
737d4e8164fSbellard    another TB */
738d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
739d4e8164fSbellard {
740d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
741d4e8164fSbellard }
742d4e8164fSbellard 
7432e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
744fd6ce8f6Sbellard {
7456a00d601Sbellard     CPUState *env;
746fd6ce8f6Sbellard     PageDesc *p;
7478a40a180Sbellard     unsigned int h, n1;
74800f82b8aSaurel32     target_phys_addr_t phys_pc;
7498a40a180Sbellard     TranslationBlock *tb1, *tb2;
750fd6ce8f6Sbellard 
7519fa3e853Sbellard     /* remove the TB from the hash list */
7529fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
7539fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
7549fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
7559fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
7569fa3e853Sbellard 
7579fa3e853Sbellard     /* remove the TB from the page list */
7589fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
7599fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
7609fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7619fa3e853Sbellard         invalidate_page_bitmap(p);
7629fa3e853Sbellard     }
7639fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
7649fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
7659fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7669fa3e853Sbellard         invalidate_page_bitmap(p);
7679fa3e853Sbellard     }
7689fa3e853Sbellard 
7698a40a180Sbellard     tb_invalidated_flag = 1;
7708a40a180Sbellard 
7718a40a180Sbellard     /* remove the TB from the hash list */
7728a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
7736a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7746a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
7756a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
7766a00d601Sbellard     }
7778a40a180Sbellard 
7788a40a180Sbellard     /* suppress this TB from the two jump lists */
7798a40a180Sbellard     tb_jmp_remove(tb, 0);
7808a40a180Sbellard     tb_jmp_remove(tb, 1);
7818a40a180Sbellard 
7828a40a180Sbellard     /* suppress any remaining jumps to this TB */
7838a40a180Sbellard     tb1 = tb->jmp_first;
7848a40a180Sbellard     for(;;) {
7858a40a180Sbellard         n1 = (long)tb1 & 3;
7868a40a180Sbellard         if (n1 == 2)
7878a40a180Sbellard             break;
7888a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7898a40a180Sbellard         tb2 = tb1->jmp_next[n1];
7908a40a180Sbellard         tb_reset_jump(tb1, n1);
7918a40a180Sbellard         tb1->jmp_next[n1] = NULL;
7928a40a180Sbellard         tb1 = tb2;
7938a40a180Sbellard     }
7948a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
7958a40a180Sbellard 
796e3db7226Sbellard     tb_phys_invalidate_count++;
7979fa3e853Sbellard }
7989fa3e853Sbellard 
7999fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8009fa3e853Sbellard {
8019fa3e853Sbellard     int end, mask, end1;
8029fa3e853Sbellard 
8039fa3e853Sbellard     end = start + len;
8049fa3e853Sbellard     tab += start >> 3;
8059fa3e853Sbellard     mask = 0xff << (start & 7);
8069fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8079fa3e853Sbellard         if (start < end) {
8089fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8099fa3e853Sbellard             *tab |= mask;
8109fa3e853Sbellard         }
8119fa3e853Sbellard     } else {
8129fa3e853Sbellard         *tab++ |= mask;
8139fa3e853Sbellard         start = (start + 8) & ~7;
8149fa3e853Sbellard         end1 = end & ~7;
8159fa3e853Sbellard         while (start < end1) {
8169fa3e853Sbellard             *tab++ = 0xff;
8179fa3e853Sbellard             start += 8;
8189fa3e853Sbellard         }
8199fa3e853Sbellard         if (start < end) {
8209fa3e853Sbellard             mask = ~(0xff << (end & 7));
8219fa3e853Sbellard             *tab |= mask;
8229fa3e853Sbellard         }
8239fa3e853Sbellard     }
8249fa3e853Sbellard }
8259fa3e853Sbellard 
8269fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
8279fa3e853Sbellard {
8289fa3e853Sbellard     int n, tb_start, tb_end;
8299fa3e853Sbellard     TranslationBlock *tb;
8309fa3e853Sbellard 
831b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
8329fa3e853Sbellard 
8339fa3e853Sbellard     tb = p->first_tb;
8349fa3e853Sbellard     while (tb != NULL) {
8359fa3e853Sbellard         n = (long)tb & 3;
8369fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8379fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8389fa3e853Sbellard         if (n == 0) {
8399fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8409fa3e853Sbellard                it is not a problem */
8419fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
8429fa3e853Sbellard             tb_end = tb_start + tb->size;
8439fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
8449fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
8459fa3e853Sbellard         } else {
8469fa3e853Sbellard             tb_start = 0;
8479fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8489fa3e853Sbellard         }
8499fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
8509fa3e853Sbellard         tb = tb->page_next[n];
8519fa3e853Sbellard     }
8529fa3e853Sbellard }
8539fa3e853Sbellard 
8542e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
8552e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
8562e70f6efSpbrook                               int flags, int cflags)
857d720b93dSbellard {
858d720b93dSbellard     TranslationBlock *tb;
859d720b93dSbellard     uint8_t *tc_ptr;
860d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
861d720b93dSbellard     int code_gen_size;
862d720b93dSbellard 
863c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
864c27004ecSbellard     tb = tb_alloc(pc);
865d720b93dSbellard     if (!tb) {
866d720b93dSbellard         /* flush must be done */
867d720b93dSbellard         tb_flush(env);
868d720b93dSbellard         /* cannot fail at this point */
869c27004ecSbellard         tb = tb_alloc(pc);
8702e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
8712e70f6efSpbrook         tb_invalidated_flag = 1;
872d720b93dSbellard     }
873d720b93dSbellard     tc_ptr = code_gen_ptr;
874d720b93dSbellard     tb->tc_ptr = tc_ptr;
875d720b93dSbellard     tb->cs_base = cs_base;
876d720b93dSbellard     tb->flags = flags;
877d720b93dSbellard     tb->cflags = cflags;
878d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
879d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880d720b93dSbellard 
881d720b93dSbellard     /* check next page if needed */
882c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
883d720b93dSbellard     phys_page2 = -1;
884c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
885d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
886d720b93dSbellard     }
887d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
8882e70f6efSpbrook     return tb;
889d720b93dSbellard }
890d720b93dSbellard 
8919fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
8929fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
893d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
894d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
895d720b93dSbellard    TB if code is modified inside this TB. */
89600f82b8aSaurel32 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
897d720b93dSbellard                                    int is_cpu_write_access)
8989fa3e853Sbellard {
8996b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
900d720b93dSbellard     CPUState *env = cpu_single_env;
9019fa3e853Sbellard     target_ulong tb_start, tb_end;
9026b917547Saliguori     PageDesc *p;
9036b917547Saliguori     int n;
9046b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9056b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9066b917547Saliguori     TranslationBlock *current_tb = NULL;
9076b917547Saliguori     int current_tb_modified = 0;
9086b917547Saliguori     target_ulong current_pc = 0;
9096b917547Saliguori     target_ulong current_cs_base = 0;
9106b917547Saliguori     int current_flags = 0;
9116b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9129fa3e853Sbellard 
9139fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9149fa3e853Sbellard     if (!p)
9159fa3e853Sbellard         return;
9169fa3e853Sbellard     if (!p->code_bitmap &&
917d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918d720b93dSbellard         is_cpu_write_access) {
9199fa3e853Sbellard         /* build code bitmap */
9209fa3e853Sbellard         build_page_bitmap(p);
9219fa3e853Sbellard     }
9229fa3e853Sbellard 
9239fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
9249fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
9259fa3e853Sbellard     tb = p->first_tb;
9269fa3e853Sbellard     while (tb != NULL) {
9279fa3e853Sbellard         n = (long)tb & 3;
9289fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9299fa3e853Sbellard         tb_next = tb->page_next[n];
9309fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9319fa3e853Sbellard         if (n == 0) {
9329fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9339fa3e853Sbellard                it is not a problem */
9349fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9359fa3e853Sbellard             tb_end = tb_start + tb->size;
9369fa3e853Sbellard         } else {
9379fa3e853Sbellard             tb_start = tb->page_addr[1];
9389fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9399fa3e853Sbellard         }
9409fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
941d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
942d720b93dSbellard             if (current_tb_not_found) {
943d720b93dSbellard                 current_tb_not_found = 0;
944d720b93dSbellard                 current_tb = NULL;
9452e70f6efSpbrook                 if (env->mem_io_pc) {
946d720b93dSbellard                     /* now we have a real cpu fault */
9472e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
948d720b93dSbellard                 }
949d720b93dSbellard             }
950d720b93dSbellard             if (current_tb == tb &&
9512e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
952d720b93dSbellard                 /* If we are modifying the current TB, we must stop
953d720b93dSbellard                 its execution. We could be more precise by checking
954d720b93dSbellard                 that the modification is after the current PC, but it
955d720b93dSbellard                 would require a specialized function to partially
956d720b93dSbellard                 restore the CPU state */
957d720b93dSbellard 
958d720b93dSbellard                 current_tb_modified = 1;
959d720b93dSbellard                 cpu_restore_state(current_tb, env,
9602e70f6efSpbrook                                   env->mem_io_pc, NULL);
9616b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
9626b917547Saliguori                                      &current_flags);
963d720b93dSbellard             }
964d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9656f5a9f7eSbellard             /* we need to do that to handle the case where a signal
9666f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
9676f5a9f7eSbellard             saved_tb = NULL;
9686f5a9f7eSbellard             if (env) {
969ea1c1802Sbellard                 saved_tb = env->current_tb;
970ea1c1802Sbellard                 env->current_tb = NULL;
9716f5a9f7eSbellard             }
9729fa3e853Sbellard             tb_phys_invalidate(tb, -1);
9736f5a9f7eSbellard             if (env) {
974ea1c1802Sbellard                 env->current_tb = saved_tb;
975ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
976ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
9779fa3e853Sbellard             }
9786f5a9f7eSbellard         }
9799fa3e853Sbellard         tb = tb_next;
9809fa3e853Sbellard     }
9819fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
9829fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
9839fa3e853Sbellard     if (!p->first_tb) {
9849fa3e853Sbellard         invalidate_page_bitmap(p);
985d720b93dSbellard         if (is_cpu_write_access) {
9862e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
987d720b93dSbellard         }
988d720b93dSbellard     }
989d720b93dSbellard #endif
990d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
991d720b93dSbellard     if (current_tb_modified) {
992d720b93dSbellard         /* we generate a block containing just the instruction
993d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
994d720b93dSbellard            itself */
995ea1c1802Sbellard         env->current_tb = NULL;
9962e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
997d720b93dSbellard         cpu_resume_from_signal(env, NULL);
9989fa3e853Sbellard     }
9999fa3e853Sbellard #endif
10009fa3e853Sbellard }
10019fa3e853Sbellard 
10029fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
100300f82b8aSaurel32 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
10049fa3e853Sbellard {
10059fa3e853Sbellard     PageDesc *p;
10069fa3e853Sbellard     int offset, b;
100759817ccbSbellard #if 0
1008a4193c8aSbellard     if (1) {
100993fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10102e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1011a4193c8aSbellard                   cpu_single_env->eip,
1012a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013a4193c8aSbellard     }
101459817ccbSbellard #endif
10159fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10169fa3e853Sbellard     if (!p)
10179fa3e853Sbellard         return;
10189fa3e853Sbellard     if (p->code_bitmap) {
10199fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
10209fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
10219fa3e853Sbellard         if (b & ((1 << len) - 1))
10229fa3e853Sbellard             goto do_invalidate;
10239fa3e853Sbellard     } else {
10249fa3e853Sbellard     do_invalidate:
1025d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
10269fa3e853Sbellard     }
10279fa3e853Sbellard }
10289fa3e853Sbellard 
10299fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
103000f82b8aSaurel32 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031d720b93dSbellard                                     unsigned long pc, void *puc)
10329fa3e853Sbellard {
10336b917547Saliguori     TranslationBlock *tb;
10349fa3e853Sbellard     PageDesc *p;
10356b917547Saliguori     int n;
1036d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
10376b917547Saliguori     TranslationBlock *current_tb = NULL;
1038d720b93dSbellard     CPUState *env = cpu_single_env;
10396b917547Saliguori     int current_tb_modified = 0;
10406b917547Saliguori     target_ulong current_pc = 0;
10416b917547Saliguori     target_ulong current_cs_base = 0;
10426b917547Saliguori     int current_flags = 0;
1043d720b93dSbellard #endif
10449fa3e853Sbellard 
10459fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
10469fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1047fd6ce8f6Sbellard     if (!p)
1048fd6ce8f6Sbellard         return;
1049fd6ce8f6Sbellard     tb = p->first_tb;
1050d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1051d720b93dSbellard     if (tb && pc != 0) {
1052d720b93dSbellard         current_tb = tb_find_pc(pc);
1053d720b93dSbellard     }
1054d720b93dSbellard #endif
1055fd6ce8f6Sbellard     while (tb != NULL) {
10569fa3e853Sbellard         n = (long)tb & 3;
10579fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1058d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1059d720b93dSbellard         if (current_tb == tb &&
10602e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1062d720b93dSbellard                    its execution. We could be more precise by checking
1063d720b93dSbellard                    that the modification is after the current PC, but it
1064d720b93dSbellard                    would require a specialized function to partially
1065d720b93dSbellard                    restore the CPU state */
1066d720b93dSbellard 
1067d720b93dSbellard             current_tb_modified = 1;
1068d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
10696b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10706b917547Saliguori                                  &current_flags);
1071d720b93dSbellard         }
1072d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10739fa3e853Sbellard         tb_phys_invalidate(tb, addr);
10749fa3e853Sbellard         tb = tb->page_next[n];
1075fd6ce8f6Sbellard     }
1076fd6ce8f6Sbellard     p->first_tb = NULL;
1077d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1078d720b93dSbellard     if (current_tb_modified) {
1079d720b93dSbellard         /* we generate a block containing just the instruction
1080d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1081d720b93dSbellard            itself */
1082ea1c1802Sbellard         env->current_tb = NULL;
10832e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084d720b93dSbellard         cpu_resume_from_signal(env, puc);
1085d720b93dSbellard     }
1086d720b93dSbellard #endif
1087fd6ce8f6Sbellard }
10889fa3e853Sbellard #endif
1089fd6ce8f6Sbellard 
1090fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
10919fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
109253a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1093fd6ce8f6Sbellard {
1094fd6ce8f6Sbellard     PageDesc *p;
10959fa3e853Sbellard     TranslationBlock *last_first_tb;
10969fa3e853Sbellard 
10979fa3e853Sbellard     tb->page_addr[n] = page_addr;
10983a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
10999fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11009fa3e853Sbellard     last_first_tb = p->first_tb;
11019fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11029fa3e853Sbellard     invalidate_page_bitmap(p);
11039fa3e853Sbellard 
1104107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1105d720b93dSbellard 
11069fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11079fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
110853a5960aSpbrook         target_ulong addr;
110953a5960aSpbrook         PageDesc *p2;
1110fd6ce8f6Sbellard         int prot;
1111fd6ce8f6Sbellard 
1112fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1113fd6ce8f6Sbellard            page fault + mprotect overhead) */
111453a5960aSpbrook         page_addr &= qemu_host_page_mask;
1115fd6ce8f6Sbellard         prot = 0;
111653a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
111753a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
111853a5960aSpbrook 
111953a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
112053a5960aSpbrook             if (!p2)
112153a5960aSpbrook                 continue;
112253a5960aSpbrook             prot |= p2->flags;
112353a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
112453a5960aSpbrook             page_get_flags(addr);
112553a5960aSpbrook           }
112653a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1127fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1128fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1129ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
113053a5960aSpbrook                page_addr);
1131fd6ce8f6Sbellard #endif
1132fd6ce8f6Sbellard     }
11339fa3e853Sbellard #else
11349fa3e853Sbellard     /* if some code is already present, then the pages are already
11359fa3e853Sbellard        protected. So we handle the case where only the first TB is
11369fa3e853Sbellard        allocated in a physical page */
11379fa3e853Sbellard     if (!last_first_tb) {
11386a00d601Sbellard         tlb_protect_code(page_addr);
11399fa3e853Sbellard     }
11409fa3e853Sbellard #endif
1141d720b93dSbellard 
1142d720b93dSbellard #endif /* TARGET_HAS_SMC */
1143fd6ce8f6Sbellard }
1144fd6ce8f6Sbellard 
1145fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1146fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1147c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1148fd6ce8f6Sbellard {
1149fd6ce8f6Sbellard     TranslationBlock *tb;
1150fd6ce8f6Sbellard 
115126a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
115226a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153d4e8164fSbellard         return NULL;
1154fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1155fd6ce8f6Sbellard     tb->pc = pc;
1156b448f2f3Sbellard     tb->cflags = 0;
1157d4e8164fSbellard     return tb;
1158d4e8164fSbellard }
1159d4e8164fSbellard 
11602e70f6efSpbrook void tb_free(TranslationBlock *tb)
11612e70f6efSpbrook {
1162bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
11632e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
11642e70f6efSpbrook        be the last one generated.  */
11652e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
11662e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
11672e70f6efSpbrook         nb_tbs--;
11682e70f6efSpbrook     }
11692e70f6efSpbrook }
11702e70f6efSpbrook 
11719fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
11729fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
11739fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
11749fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1175d4e8164fSbellard {
11769fa3e853Sbellard     unsigned int h;
11779fa3e853Sbellard     TranslationBlock **ptb;
11789fa3e853Sbellard 
1179c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1180c8a706feSpbrook        before we are done.  */
1181c8a706feSpbrook     mmap_lock();
11829fa3e853Sbellard     /* add in the physical hash table */
11839fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
11849fa3e853Sbellard     ptb = &tb_phys_hash[h];
11859fa3e853Sbellard     tb->phys_hash_next = *ptb;
11869fa3e853Sbellard     *ptb = tb;
1187fd6ce8f6Sbellard 
1188fd6ce8f6Sbellard     /* add in the page list */
11899fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
11909fa3e853Sbellard     if (phys_page2 != -1)
11919fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
11929fa3e853Sbellard     else
11939fa3e853Sbellard         tb->page_addr[1] = -1;
11949fa3e853Sbellard 
1195d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196d4e8164fSbellard     tb->jmp_next[0] = NULL;
1197d4e8164fSbellard     tb->jmp_next[1] = NULL;
1198d4e8164fSbellard 
1199d4e8164fSbellard     /* init original jump addresses */
1200d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1201d4e8164fSbellard         tb_reset_jump(tb, 0);
1202d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1203d4e8164fSbellard         tb_reset_jump(tb, 1);
12048a40a180Sbellard 
12058a40a180Sbellard #ifdef DEBUG_TB_CHECK
12068a40a180Sbellard     tb_page_check();
12078a40a180Sbellard #endif
1208c8a706feSpbrook     mmap_unlock();
1209fd6ce8f6Sbellard }
1210fd6ce8f6Sbellard 
1211a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1213a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214a513fe19Sbellard {
1215a513fe19Sbellard     int m_min, m_max, m;
1216a513fe19Sbellard     unsigned long v;
1217a513fe19Sbellard     TranslationBlock *tb;
1218a513fe19Sbellard 
1219a513fe19Sbellard     if (nb_tbs <= 0)
1220a513fe19Sbellard         return NULL;
1221a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1222a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1223a513fe19Sbellard         return NULL;
1224a513fe19Sbellard     /* binary search (cf Knuth) */
1225a513fe19Sbellard     m_min = 0;
1226a513fe19Sbellard     m_max = nb_tbs - 1;
1227a513fe19Sbellard     while (m_min <= m_max) {
1228a513fe19Sbellard         m = (m_min + m_max) >> 1;
1229a513fe19Sbellard         tb = &tbs[m];
1230a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1231a513fe19Sbellard         if (v == tc_ptr)
1232a513fe19Sbellard             return tb;
1233a513fe19Sbellard         else if (tc_ptr < v) {
1234a513fe19Sbellard             m_max = m - 1;
1235a513fe19Sbellard         } else {
1236a513fe19Sbellard             m_min = m + 1;
1237a513fe19Sbellard         }
1238a513fe19Sbellard     }
1239a513fe19Sbellard     return &tbs[m_max];
1240a513fe19Sbellard }
12417501267eSbellard 
1242ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1243ea041c0eSbellard 
1244ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245ea041c0eSbellard {
1246ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1247ea041c0eSbellard     unsigned int n1;
1248ea041c0eSbellard 
1249ea041c0eSbellard     tb1 = tb->jmp_next[n];
1250ea041c0eSbellard     if (tb1 != NULL) {
1251ea041c0eSbellard         /* find head of list */
1252ea041c0eSbellard         for(;;) {
1253ea041c0eSbellard             n1 = (long)tb1 & 3;
1254ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255ea041c0eSbellard             if (n1 == 2)
1256ea041c0eSbellard                 break;
1257ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1258ea041c0eSbellard         }
1259ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1260ea041c0eSbellard         tb_next = tb1;
1261ea041c0eSbellard 
1262ea041c0eSbellard         /* remove tb from the jmp_first list */
1263ea041c0eSbellard         ptb = &tb_next->jmp_first;
1264ea041c0eSbellard         for(;;) {
1265ea041c0eSbellard             tb1 = *ptb;
1266ea041c0eSbellard             n1 = (long)tb1 & 3;
1267ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268ea041c0eSbellard             if (n1 == n && tb1 == tb)
1269ea041c0eSbellard                 break;
1270ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1271ea041c0eSbellard         }
1272ea041c0eSbellard         *ptb = tb->jmp_next[n];
1273ea041c0eSbellard         tb->jmp_next[n] = NULL;
1274ea041c0eSbellard 
1275ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1276ea041c0eSbellard         tb_reset_jump(tb, n);
1277ea041c0eSbellard 
12780124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1279ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1280ea041c0eSbellard     }
1281ea041c0eSbellard }
1282ea041c0eSbellard 
1283ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1284ea041c0eSbellard {
1285ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1286ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1287ea041c0eSbellard }
1288ea041c0eSbellard 
12891fddef4bSbellard #if defined(TARGET_HAS_ICE)
1290d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291d720b93dSbellard {
12929b3c35e0Sj_mayer     target_phys_addr_t addr;
12939b3c35e0Sj_mayer     target_ulong pd;
1294c2f07f81Spbrook     ram_addr_t ram_addr;
1295c2f07f81Spbrook     PhysPageDesc *p;
1296d720b93dSbellard 
1297c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1298c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299c2f07f81Spbrook     if (!p) {
1300c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1301c2f07f81Spbrook     } else {
1302c2f07f81Spbrook         pd = p->phys_offset;
1303c2f07f81Spbrook     }
1304c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1306d720b93dSbellard }
1307c27004ecSbellard #endif
1308d720b93dSbellard 
13096658ffb8Spbrook /* Add a watchpoint.  */
1310a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
13126658ffb8Spbrook {
1313b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1314c0ce998eSaliguori     CPUWatchpoint *wp;
13156658ffb8Spbrook 
1316b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320b4051334Saliguori         return -EINVAL;
1321b4051334Saliguori     }
1322a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
13236658ffb8Spbrook 
1324a1d1bb31Saliguori     wp->vaddr = addr;
1325b4051334Saliguori     wp->len_mask = len_mask;
1326a1d1bb31Saliguori     wp->flags = flags;
1327a1d1bb31Saliguori 
13282dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1329c0ce998eSaliguori     if (flags & BP_GDB)
1330c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331c0ce998eSaliguori     else
1332c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1333a1d1bb31Saliguori 
13346658ffb8Spbrook     tlb_flush_page(env, addr);
1335a1d1bb31Saliguori 
1336a1d1bb31Saliguori     if (watchpoint)
1337a1d1bb31Saliguori         *watchpoint = wp;
1338a1d1bb31Saliguori     return 0;
13396658ffb8Spbrook }
13406658ffb8Spbrook 
1341a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1342a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343a1d1bb31Saliguori                           int flags)
13446658ffb8Spbrook {
1345b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1346a1d1bb31Saliguori     CPUWatchpoint *wp;
13476658ffb8Spbrook 
1348c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
13506e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
13526658ffb8Spbrook             return 0;
13536658ffb8Spbrook         }
13546658ffb8Spbrook     }
1355a1d1bb31Saliguori     return -ENOENT;
13566658ffb8Spbrook }
13576658ffb8Spbrook 
1358a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1359a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360a1d1bb31Saliguori {
1361c0ce998eSaliguori     TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
13627d03f82fSedgar_igl 
1363a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1364a1d1bb31Saliguori 
1365a1d1bb31Saliguori     qemu_free(watchpoint);
13667d03f82fSedgar_igl }
13677d03f82fSedgar_igl 
1368a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1369a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370a1d1bb31Saliguori {
1371c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1372a1d1bb31Saliguori 
1373c0ce998eSaliguori     TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374a1d1bb31Saliguori         if (wp->flags & mask)
1375a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1376a1d1bb31Saliguori     }
1377c0ce998eSaliguori }
1378a1d1bb31Saliguori 
1379a1d1bb31Saliguori /* Add a breakpoint.  */
1380a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
13824c3a88a2Sbellard {
13831fddef4bSbellard #if defined(TARGET_HAS_ICE)
1384c0ce998eSaliguori     CPUBreakpoint *bp;
13854c3a88a2Sbellard 
1386a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
13874c3a88a2Sbellard 
1388a1d1bb31Saliguori     bp->pc = pc;
1389a1d1bb31Saliguori     bp->flags = flags;
1390a1d1bb31Saliguori 
13912dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1392c0ce998eSaliguori     if (flags & BP_GDB)
1393c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394c0ce998eSaliguori     else
1395c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1396d720b93dSbellard 
1397d720b93dSbellard     breakpoint_invalidate(env, pc);
1398a1d1bb31Saliguori 
1399a1d1bb31Saliguori     if (breakpoint)
1400a1d1bb31Saliguori         *breakpoint = bp;
14014c3a88a2Sbellard     return 0;
14024c3a88a2Sbellard #else
1403a1d1bb31Saliguori     return -ENOSYS;
14044c3a88a2Sbellard #endif
14054c3a88a2Sbellard }
14064c3a88a2Sbellard 
1407a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1408a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1409a1d1bb31Saliguori {
14107d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1411a1d1bb31Saliguori     CPUBreakpoint *bp;
1412a1d1bb31Saliguori 
1413c0ce998eSaliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1414a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1415a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1416a1d1bb31Saliguori             return 0;
14177d03f82fSedgar_igl         }
1418a1d1bb31Saliguori     }
1419a1d1bb31Saliguori     return -ENOENT;
1420a1d1bb31Saliguori #else
1421a1d1bb31Saliguori     return -ENOSYS;
14227d03f82fSedgar_igl #endif
14237d03f82fSedgar_igl }
14247d03f82fSedgar_igl 
1425a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1426a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
14274c3a88a2Sbellard {
14281fddef4bSbellard #if defined(TARGET_HAS_ICE)
1429c0ce998eSaliguori     TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1430d720b93dSbellard 
1431a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1432a1d1bb31Saliguori 
1433a1d1bb31Saliguori     qemu_free(breakpoint);
1434a1d1bb31Saliguori #endif
1435a1d1bb31Saliguori }
1436a1d1bb31Saliguori 
1437a1d1bb31Saliguori /* Remove all matching breakpoints. */
1438a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1439a1d1bb31Saliguori {
1440a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1441c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1442a1d1bb31Saliguori 
1443c0ce998eSaliguori     TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1444a1d1bb31Saliguori         if (bp->flags & mask)
1445a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1446c0ce998eSaliguori     }
14474c3a88a2Sbellard #endif
14484c3a88a2Sbellard }
14494c3a88a2Sbellard 
1450c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1451c33a346eSbellard    CPU loop after each instruction */
1452c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1453c33a346eSbellard {
14541fddef4bSbellard #if defined(TARGET_HAS_ICE)
1455c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1456c33a346eSbellard         env->singlestep_enabled = enabled;
1457c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
14589fa3e853Sbellard         /* XXX: only flush what is necessary */
14590124311eSbellard         tb_flush(env);
1460c33a346eSbellard     }
1461c33a346eSbellard #endif
1462c33a346eSbellard }
1463c33a346eSbellard 
146434865134Sbellard /* enable or disable low levels log */
146534865134Sbellard void cpu_set_log(int log_flags)
146634865134Sbellard {
146734865134Sbellard     loglevel = log_flags;
146834865134Sbellard     if (loglevel && !logfile) {
146911fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
147034865134Sbellard         if (!logfile) {
147134865134Sbellard             perror(logfilename);
147234865134Sbellard             _exit(1);
147334865134Sbellard         }
14749fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14759fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
14769fa3e853Sbellard         {
1477b55266b5Sblueswir1             static char logfile_buf[4096];
14789fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
14799fa3e853Sbellard         }
14809fa3e853Sbellard #else
148134865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
14829fa3e853Sbellard #endif
1483e735b91cSpbrook         log_append = 1;
1484e735b91cSpbrook     }
1485e735b91cSpbrook     if (!loglevel && logfile) {
1486e735b91cSpbrook         fclose(logfile);
1487e735b91cSpbrook         logfile = NULL;
148834865134Sbellard     }
148934865134Sbellard }
149034865134Sbellard 
149134865134Sbellard void cpu_set_log_filename(const char *filename)
149234865134Sbellard {
149334865134Sbellard     logfilename = strdup(filename);
1494e735b91cSpbrook     if (logfile) {
1495e735b91cSpbrook         fclose(logfile);
1496e735b91cSpbrook         logfile = NULL;
1497e735b91cSpbrook     }
1498e735b91cSpbrook     cpu_set_log(loglevel);
149934865134Sbellard }
1500c33a346eSbellard 
15010124311eSbellard /* mask must never be zero, except for A20 change call */
150268a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1503ea041c0eSbellard {
1504d5975363Spbrook #if !defined(USE_NPTL)
1505ea041c0eSbellard     TranslationBlock *tb;
150615a51156Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1507d5975363Spbrook #endif
15082e70f6efSpbrook     int old_mask;
1509ea041c0eSbellard 
1510be214e6cSaurel32     if (mask & CPU_INTERRUPT_EXIT) {
1511be214e6cSaurel32         env->exit_request = 1;
1512be214e6cSaurel32         mask &= ~CPU_INTERRUPT_EXIT;
1513be214e6cSaurel32     }
1514be214e6cSaurel32 
15152e70f6efSpbrook     old_mask = env->interrupt_request;
151668a79315Sbellard     env->interrupt_request |= mask;
1517d5975363Spbrook #if defined(USE_NPTL)
1518d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1519d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1520d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1521d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
1522d5975363Spbrook #else
15232e70f6efSpbrook     if (use_icount) {
1524266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
15252e70f6efSpbrook #ifndef CONFIG_USER_ONLY
15262e70f6efSpbrook         if (!can_do_io(env)
1527be214e6cSaurel32             && (mask & ~old_mask) != 0) {
15282e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
15292e70f6efSpbrook         }
15302e70f6efSpbrook #endif
15312e70f6efSpbrook     } else {
15322e70f6efSpbrook         tb = env->current_tb;
1533ea041c0eSbellard         /* if the cpu is currently executing code, we must unlink it and
1534ea041c0eSbellard            all the potentially executing TB */
1535ee8b7021Sbellard         if (tb && !testandset(&interrupt_lock)) {
1536ee8b7021Sbellard             env->current_tb = NULL;
1537ea041c0eSbellard             tb_reset_jump_recursive(tb);
153815a51156Saurel32             resetlock(&interrupt_lock);
1539ea041c0eSbellard         }
15402e70f6efSpbrook     }
1541d5975363Spbrook #endif
1542ea041c0eSbellard }
1543ea041c0eSbellard 
1544b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1545b54ad049Sbellard {
1546b54ad049Sbellard     env->interrupt_request &= ~mask;
1547b54ad049Sbellard }
1548b54ad049Sbellard 
1549c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1550f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1551f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1552f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1553f193c797Sbellard       "show target assembly code for each compiled TB" },
1554f193c797Sbellard     { CPU_LOG_TB_OP, "op",
155557fec1feSbellard       "show micro ops for each compiled TB" },
1556f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1557e01a1157Sblueswir1       "show micro ops "
1558e01a1157Sblueswir1 #ifdef TARGET_I386
1559e01a1157Sblueswir1       "before eflags optimization and "
1560f193c797Sbellard #endif
1561e01a1157Sblueswir1       "after liveness analysis" },
1562f193c797Sbellard     { CPU_LOG_INT, "int",
1563f193c797Sbellard       "show interrupts/exceptions in short format" },
1564f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1565f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
15669fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1567e91c8a77Sths       "show CPU state before block translation" },
1568f193c797Sbellard #ifdef TARGET_I386
1569f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1570f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1571eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1572eca1bdf4Saliguori       "show CPU state before CPU resets" },
1573f193c797Sbellard #endif
15748e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1575fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1576fd872598Sbellard       "show all i/o ports accesses" },
15778e3a9fd2Sbellard #endif
1578f193c797Sbellard     { 0, NULL, NULL },
1579f193c797Sbellard };
1580f193c797Sbellard 
1581f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1582f193c797Sbellard {
1583f193c797Sbellard     if (strlen(s2) != n)
1584f193c797Sbellard         return 0;
1585f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1586f193c797Sbellard }
1587f193c797Sbellard 
1588f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1589f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1590f193c797Sbellard {
1591c7cd6a37Sblueswir1     const CPULogItem *item;
1592f193c797Sbellard     int mask;
1593f193c797Sbellard     const char *p, *p1;
1594f193c797Sbellard 
1595f193c797Sbellard     p = str;
1596f193c797Sbellard     mask = 0;
1597f193c797Sbellard     for(;;) {
1598f193c797Sbellard         p1 = strchr(p, ',');
1599f193c797Sbellard         if (!p1)
1600f193c797Sbellard             p1 = p + strlen(p);
16018e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
16028e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
16038e3a9fd2Sbellard 			mask |= item->mask;
16048e3a9fd2Sbellard 		}
16058e3a9fd2Sbellard 	} else {
1606f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1607f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1608f193c797Sbellard                 goto found;
1609f193c797Sbellard         }
1610f193c797Sbellard         return 0;
16118e3a9fd2Sbellard 	}
1612f193c797Sbellard     found:
1613f193c797Sbellard         mask |= item->mask;
1614f193c797Sbellard         if (*p1 != ',')
1615f193c797Sbellard             break;
1616f193c797Sbellard         p = p1 + 1;
1617f193c797Sbellard     }
1618f193c797Sbellard     return mask;
1619f193c797Sbellard }
1620ea041c0eSbellard 
16217501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
16227501267eSbellard {
16237501267eSbellard     va_list ap;
1624493ae1f0Spbrook     va_list ap2;
16257501267eSbellard 
16267501267eSbellard     va_start(ap, fmt);
1627493ae1f0Spbrook     va_copy(ap2, ap);
16287501267eSbellard     fprintf(stderr, "qemu: fatal: ");
16297501267eSbellard     vfprintf(stderr, fmt, ap);
16307501267eSbellard     fprintf(stderr, "\n");
16317501267eSbellard #ifdef TARGET_I386
16327fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
16337fe48483Sbellard #else
16347fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
16357501267eSbellard #endif
163693fcfe39Saliguori     if (qemu_log_enabled()) {
163793fcfe39Saliguori         qemu_log("qemu: fatal: ");
163893fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
163993fcfe39Saliguori         qemu_log("\n");
1640f9373291Sj_mayer #ifdef TARGET_I386
164193fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1642f9373291Sj_mayer #else
164393fcfe39Saliguori         log_cpu_state(env, 0);
1644f9373291Sj_mayer #endif
164531b1a7b4Saliguori         qemu_log_flush();
164693fcfe39Saliguori         qemu_log_close();
1647924edcaeSbalrog     }
1648493ae1f0Spbrook     va_end(ap2);
1649f9373291Sj_mayer     va_end(ap);
16507501267eSbellard     abort();
16517501267eSbellard }
16527501267eSbellard 
1653c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1654c5be9f08Sths {
165501ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1656c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1657c5be9f08Sths     int cpu_index = new_env->cpu_index;
16585a38f081Saliguori #if defined(TARGET_HAS_ICE)
16595a38f081Saliguori     CPUBreakpoint *bp;
16605a38f081Saliguori     CPUWatchpoint *wp;
16615a38f081Saliguori #endif
16625a38f081Saliguori 
1663c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
16645a38f081Saliguori 
16655a38f081Saliguori     /* Preserve chaining and index. */
1666c5be9f08Sths     new_env->next_cpu = next_cpu;
1667c5be9f08Sths     new_env->cpu_index = cpu_index;
16685a38f081Saliguori 
16695a38f081Saliguori     /* Clone all break/watchpoints.
16705a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
16715a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
16725a38f081Saliguori     TAILQ_INIT(&env->breakpoints);
16735a38f081Saliguori     TAILQ_INIT(&env->watchpoints);
16745a38f081Saliguori #if defined(TARGET_HAS_ICE)
16755a38f081Saliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
16765a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
16775a38f081Saliguori     }
16785a38f081Saliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
16795a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
16805a38f081Saliguori                               wp->flags, NULL);
16815a38f081Saliguori     }
16825a38f081Saliguori #endif
16835a38f081Saliguori 
1684c5be9f08Sths     return new_env;
1685c5be9f08Sths }
1686c5be9f08Sths 
16870124311eSbellard #if !defined(CONFIG_USER_ONLY)
16880124311eSbellard 
16895c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
16905c751e99Sedgar_igl {
16915c751e99Sedgar_igl     unsigned int i;
16925c751e99Sedgar_igl 
16935c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
16945c751e99Sedgar_igl        overlap the flushed page.  */
16955c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
16965c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
16975c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
16985c751e99Sedgar_igl 
16995c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
17005c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
17015c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
17025c751e99Sedgar_igl }
17035c751e99Sedgar_igl 
1704ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1705ee8b7021Sbellard    implemented yet) */
1706ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
170733417e70Sbellard {
170833417e70Sbellard     int i;
17090124311eSbellard 
17109fa3e853Sbellard #if defined(DEBUG_TLB)
17119fa3e853Sbellard     printf("tlb_flush:\n");
17129fa3e853Sbellard #endif
17130124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17140124311eSbellard        links while we are modifying them */
17150124311eSbellard     env->current_tb = NULL;
17160124311eSbellard 
171733417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
171884b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
171984b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
172084b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
172184b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
172284b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
172384b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
17246fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17256fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
17266fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
17276fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
17286fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17296fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
17306fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
17316fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
17326fa4cea9Sj_mayer #endif
17336fa4cea9Sj_mayer #endif
173433417e70Sbellard     }
17359fa3e853Sbellard 
17368a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
17379fa3e853Sbellard 
17380a962c02Sbellard #ifdef USE_KQEMU
17390a962c02Sbellard     if (env->kqemu_enabled) {
17400a962c02Sbellard         kqemu_flush(env, flush_global);
17410a962c02Sbellard     }
17420a962c02Sbellard #endif
1743e3db7226Sbellard     tlb_flush_count++;
174433417e70Sbellard }
174533417e70Sbellard 
1746274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
174761382a50Sbellard {
174884b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
174984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
175084b7b8e7Sbellard         addr == (tlb_entry->addr_write &
175184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
175284b7b8e7Sbellard         addr == (tlb_entry->addr_code &
175384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
175484b7b8e7Sbellard         tlb_entry->addr_read = -1;
175584b7b8e7Sbellard         tlb_entry->addr_write = -1;
175684b7b8e7Sbellard         tlb_entry->addr_code = -1;
175784b7b8e7Sbellard     }
175861382a50Sbellard }
175961382a50Sbellard 
17602e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
176133417e70Sbellard {
17628a40a180Sbellard     int i;
17630124311eSbellard 
17649fa3e853Sbellard #if defined(DEBUG_TLB)
1765108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
17669fa3e853Sbellard #endif
17670124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17680124311eSbellard        links while we are modifying them */
17690124311eSbellard     env->current_tb = NULL;
177033417e70Sbellard 
177161382a50Sbellard     addr &= TARGET_PAGE_MASK;
177233417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
177384b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
177484b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
17756fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17766fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
17776fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17786fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
17796fa4cea9Sj_mayer #endif
17806fa4cea9Sj_mayer #endif
17810124311eSbellard 
17825c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
178361382a50Sbellard 
17840a962c02Sbellard #ifdef USE_KQEMU
17850a962c02Sbellard     if (env->kqemu_enabled) {
17860a962c02Sbellard         kqemu_flush_page(env, addr);
17870a962c02Sbellard     }
17880a962c02Sbellard #endif
17899fa3e853Sbellard }
17909fa3e853Sbellard 
17919fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
17929fa3e853Sbellard    can be detected */
17936a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
179461382a50Sbellard {
17956a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
17966a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
17976a00d601Sbellard                                     CODE_DIRTY_FLAG);
17989fa3e853Sbellard }
17999fa3e853Sbellard 
18009fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
18013a7d929eSbellard    tested for self modifying code */
18023a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
18033a7d929eSbellard                                     target_ulong vaddr)
18049fa3e853Sbellard {
18053a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
18069fa3e853Sbellard }
18079fa3e853Sbellard 
18081ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
18091ccde1cbSbellard                                          unsigned long start, unsigned long length)
18101ccde1cbSbellard {
18111ccde1cbSbellard     unsigned long addr;
181284b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
181384b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
18141ccde1cbSbellard         if ((addr - start) < length) {
18150f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
18161ccde1cbSbellard         }
18171ccde1cbSbellard     }
18181ccde1cbSbellard }
18191ccde1cbSbellard 
18203a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
18210a962c02Sbellard                                      int dirty_flags)
18221ccde1cbSbellard {
18231ccde1cbSbellard     CPUState *env;
18244f2ac237Sbellard     unsigned long length, start1;
18250a962c02Sbellard     int i, mask, len;
18260a962c02Sbellard     uint8_t *p;
18271ccde1cbSbellard 
18281ccde1cbSbellard     start &= TARGET_PAGE_MASK;
18291ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
18301ccde1cbSbellard 
18311ccde1cbSbellard     length = end - start;
18321ccde1cbSbellard     if (length == 0)
18331ccde1cbSbellard         return;
18340a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
18353a7d929eSbellard #ifdef USE_KQEMU
18366a00d601Sbellard     /* XXX: should not depend on cpu context */
18376a00d601Sbellard     env = first_cpu;
18383a7d929eSbellard     if (env->kqemu_enabled) {
1839f23db169Sbellard         ram_addr_t addr;
1840f23db169Sbellard         addr = start;
1841f23db169Sbellard         for(i = 0; i < len; i++) {
1842f23db169Sbellard             kqemu_set_notdirty(env, addr);
1843f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1844f23db169Sbellard         }
18453a7d929eSbellard     }
18463a7d929eSbellard #endif
1847f23db169Sbellard     mask = ~dirty_flags;
1848f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1849f23db169Sbellard     for(i = 0; i < len; i++)
1850f23db169Sbellard         p[i] &= mask;
1851f23db169Sbellard 
18521ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18531ccde1cbSbellard        when accessing the range */
185459817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
18556a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
18561ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
185784b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
18581ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
185984b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
18606fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
18616fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
18626fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
18636fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
18646fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
18656fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
18666fa4cea9Sj_mayer #endif
18676fa4cea9Sj_mayer #endif
18686a00d601Sbellard     }
18691ccde1cbSbellard }
18701ccde1cbSbellard 
187174576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
187274576198Saliguori {
187374576198Saliguori     in_migration = enable;
187474576198Saliguori     return 0;
187574576198Saliguori }
187674576198Saliguori 
187774576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
187874576198Saliguori {
187974576198Saliguori     return in_migration;
188074576198Saliguori }
188174576198Saliguori 
18822bec46dcSaliguori void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
18832bec46dcSaliguori {
18842bec46dcSaliguori     if (kvm_enabled())
18852bec46dcSaliguori         kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
18862bec46dcSaliguori }
18872bec46dcSaliguori 
18883a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
18893a7d929eSbellard {
18903a7d929eSbellard     ram_addr_t ram_addr;
18913a7d929eSbellard 
189284b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
189384b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
18943a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
18953a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
18960f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
18973a7d929eSbellard         }
18983a7d929eSbellard     }
18993a7d929eSbellard }
19003a7d929eSbellard 
19013a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
19023a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
19033a7d929eSbellard {
19043a7d929eSbellard     int i;
19053a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
190684b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
19073a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
190884b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
19096fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
19106fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
19116fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
19126fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
19136fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
19146fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
19156fa4cea9Sj_mayer #endif
19166fa4cea9Sj_mayer #endif
19173a7d929eSbellard }
19183a7d929eSbellard 
19190f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
19201ccde1cbSbellard {
19210f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
19220f459d16Spbrook         tlb_entry->addr_write = vaddr;
19231ccde1cbSbellard }
19241ccde1cbSbellard 
19250f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
19260f459d16Spbrook    so that it is no longer dirty */
19270f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
19281ccde1cbSbellard {
19291ccde1cbSbellard     int i;
19301ccde1cbSbellard 
19310f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
19321ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
19330f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
19340f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
19356fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
19360f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
19376fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
19380f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
19396fa4cea9Sj_mayer #endif
19406fa4cea9Sj_mayer #endif
19411ccde1cbSbellard }
19421ccde1cbSbellard 
194359817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
194459817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
194559817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
194659817ccbSbellard    conflicting with the host address space). */
194784b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
19482e12669aSbellard                       target_phys_addr_t paddr, int prot,
19496ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
19509fa3e853Sbellard {
195192e873b9Sbellard     PhysPageDesc *p;
19524f2ac237Sbellard     unsigned long pd;
19539fa3e853Sbellard     unsigned int index;
19544f2ac237Sbellard     target_ulong address;
19550f459d16Spbrook     target_ulong code_address;
1956108c49b8Sbellard     target_phys_addr_t addend;
19579fa3e853Sbellard     int ret;
195884b7b8e7Sbellard     CPUTLBEntry *te;
1959a1d1bb31Saliguori     CPUWatchpoint *wp;
19600f459d16Spbrook     target_phys_addr_t iotlb;
19619fa3e853Sbellard 
196292e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
19639fa3e853Sbellard     if (!p) {
19649fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
19659fa3e853Sbellard     } else {
19669fa3e853Sbellard         pd = p->phys_offset;
19679fa3e853Sbellard     }
19689fa3e853Sbellard #if defined(DEBUG_TLB)
19696ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
19706ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
19719fa3e853Sbellard #endif
19729fa3e853Sbellard 
19739fa3e853Sbellard     ret = 0;
19749fa3e853Sbellard     address = vaddr;
19750f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
19760f459d16Spbrook         /* IO memory case (romd handled later) */
19770f459d16Spbrook         address |= TLB_MMIO;
19780f459d16Spbrook     }
19799fa3e853Sbellard     addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
19800f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
19810f459d16Spbrook         /* Normal RAM.  */
19820f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
19830f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
19840f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
19850f459d16Spbrook         else
19860f459d16Spbrook             iotlb |= IO_MEM_ROM;
19870f459d16Spbrook     } else {
19880f459d16Spbrook         /* IO handlers are currently passed a phsical address.
19890f459d16Spbrook            It would be nice to pass an offset from the base address
19900f459d16Spbrook            of that region.  This would avoid having to special case RAM,
19910f459d16Spbrook            and avoid full address decoding in every device.
19920f459d16Spbrook            We can't use the high bits of pd for this because
19930f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
19948da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
19958da3ff18Spbrook         if (p) {
19968da3ff18Spbrook             iotlb += p->region_offset;
19978da3ff18Spbrook         } else {
19988da3ff18Spbrook             iotlb += paddr;
19998da3ff18Spbrook         }
20009fa3e853Sbellard     }
20019fa3e853Sbellard 
20020f459d16Spbrook     code_address = address;
20036658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
20046658ffb8Spbrook        watchpoint trap routines.  */
2005c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2006a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
20070f459d16Spbrook             iotlb = io_mem_watch + paddr;
20080f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
20090f459d16Spbrook                reads of pages with a write breakpoint.  */
20100f459d16Spbrook             address |= TLB_MMIO;
20116658ffb8Spbrook         }
20126658ffb8Spbrook     }
20136658ffb8Spbrook 
201490f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
20150f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
20166ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
20170f459d16Spbrook     te->addend = addend - vaddr;
201867b915a5Sbellard     if (prot & PAGE_READ) {
201984b7b8e7Sbellard         te->addr_read = address;
20209fa3e853Sbellard     } else {
202184b7b8e7Sbellard         te->addr_read = -1;
202284b7b8e7Sbellard     }
20235c751e99Sedgar_igl 
202484b7b8e7Sbellard     if (prot & PAGE_EXEC) {
20250f459d16Spbrook         te->addr_code = code_address;
202684b7b8e7Sbellard     } else {
202784b7b8e7Sbellard         te->addr_code = -1;
20289fa3e853Sbellard     }
202967b915a5Sbellard     if (prot & PAGE_WRITE) {
2030856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2031856074ecSbellard             (pd & IO_MEM_ROMD)) {
20320f459d16Spbrook             /* Write access calls the I/O callback.  */
20330f459d16Spbrook             te->addr_write = address | TLB_MMIO;
20343a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
20351ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
20360f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
20379fa3e853Sbellard         } else {
203884b7b8e7Sbellard             te->addr_write = address;
20399fa3e853Sbellard         }
20409fa3e853Sbellard     } else {
204184b7b8e7Sbellard         te->addr_write = -1;
20429fa3e853Sbellard     }
20439fa3e853Sbellard     return ret;
20449fa3e853Sbellard }
20459fa3e853Sbellard 
20460124311eSbellard #else
20470124311eSbellard 
2048ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
20490124311eSbellard {
20500124311eSbellard }
20510124311eSbellard 
20522e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
20530124311eSbellard {
20540124311eSbellard }
20550124311eSbellard 
205684b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
20572e12669aSbellard                       target_phys_addr_t paddr, int prot,
20586ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
205933417e70Sbellard {
20609fa3e853Sbellard     return 0;
206133417e70Sbellard }
206233417e70Sbellard 
20639fa3e853Sbellard /* dump memory mappings */
20649fa3e853Sbellard void page_dump(FILE *f)
206533417e70Sbellard {
20669fa3e853Sbellard     unsigned long start, end;
20679fa3e853Sbellard     int i, j, prot, prot1;
20689fa3e853Sbellard     PageDesc *p;
20699fa3e853Sbellard 
20709fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
20719fa3e853Sbellard             "start", "end", "size", "prot");
20729fa3e853Sbellard     start = -1;
20739fa3e853Sbellard     end = -1;
20749fa3e853Sbellard     prot = 0;
20759fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
20769fa3e853Sbellard         if (i < L1_SIZE)
20779fa3e853Sbellard             p = l1_map[i];
20789fa3e853Sbellard         else
20799fa3e853Sbellard             p = NULL;
20809fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
208133417e70Sbellard             if (!p)
20829fa3e853Sbellard                 prot1 = 0;
20839fa3e853Sbellard             else
20849fa3e853Sbellard                 prot1 = p[j].flags;
20859fa3e853Sbellard             if (prot1 != prot) {
20869fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
20879fa3e853Sbellard                 if (start != -1) {
20889fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
20899fa3e853Sbellard                             start, end, end - start,
20909fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
20919fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
20929fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
209333417e70Sbellard                 }
20949fa3e853Sbellard                 if (prot1 != 0)
20959fa3e853Sbellard                     start = end;
20969fa3e853Sbellard                 else
20979fa3e853Sbellard                     start = -1;
20989fa3e853Sbellard                 prot = prot1;
20999fa3e853Sbellard             }
21009fa3e853Sbellard             if (!p)
21019fa3e853Sbellard                 break;
21029fa3e853Sbellard         }
21039fa3e853Sbellard     }
21049fa3e853Sbellard }
21059fa3e853Sbellard 
210653a5960aSpbrook int page_get_flags(target_ulong address)
21079fa3e853Sbellard {
21089fa3e853Sbellard     PageDesc *p;
21099fa3e853Sbellard 
21109fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
21119fa3e853Sbellard     if (!p)
21129fa3e853Sbellard         return 0;
21139fa3e853Sbellard     return p->flags;
21149fa3e853Sbellard }
21159fa3e853Sbellard 
21169fa3e853Sbellard /* modify the flags of a page and invalidate the code if
21179fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
21189fa3e853Sbellard    depending on PAGE_WRITE */
211953a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
21209fa3e853Sbellard {
21219fa3e853Sbellard     PageDesc *p;
212253a5960aSpbrook     target_ulong addr;
21239fa3e853Sbellard 
2124c8a706feSpbrook     /* mmap_lock should already be held.  */
21259fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
21269fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
21279fa3e853Sbellard     if (flags & PAGE_WRITE)
21289fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
21299fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21309fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
213117e2377aSpbrook         /* We may be called for host regions that are outside guest
213217e2377aSpbrook            address space.  */
213317e2377aSpbrook         if (!p)
213417e2377aSpbrook             return;
21359fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
21369fa3e853Sbellard            inside */
21379fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
21389fa3e853Sbellard             (flags & PAGE_WRITE) &&
21399fa3e853Sbellard             p->first_tb) {
2140d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
21419fa3e853Sbellard         }
21429fa3e853Sbellard         p->flags = flags;
21439fa3e853Sbellard     }
21449fa3e853Sbellard }
21459fa3e853Sbellard 
21463d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
21473d97b40bSths {
21483d97b40bSths     PageDesc *p;
21493d97b40bSths     target_ulong end;
21503d97b40bSths     target_ulong addr;
21513d97b40bSths 
215255f280c9Sbalrog     if (start + len < start)
215355f280c9Sbalrog         /* we've wrapped around */
215455f280c9Sbalrog         return -1;
215555f280c9Sbalrog 
21563d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
21573d97b40bSths     start = start & TARGET_PAGE_MASK;
21583d97b40bSths 
21593d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21603d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21613d97b40bSths         if( !p )
21623d97b40bSths             return -1;
21633d97b40bSths         if( !(p->flags & PAGE_VALID) )
21643d97b40bSths             return -1;
21653d97b40bSths 
2166dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21673d97b40bSths             return -1;
2168dae3270cSbellard         if (flags & PAGE_WRITE) {
2169dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21703d97b40bSths                 return -1;
2171dae3270cSbellard             /* unprotect the page if it was put read-only because it
2172dae3270cSbellard                contains translated code */
2173dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2174dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2175dae3270cSbellard                     return -1;
2176dae3270cSbellard             }
2177dae3270cSbellard             return 0;
2178dae3270cSbellard         }
21793d97b40bSths     }
21803d97b40bSths     return 0;
21813d97b40bSths }
21823d97b40bSths 
21839fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
21849fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
218553a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
21869fa3e853Sbellard {
21879fa3e853Sbellard     unsigned int page_index, prot, pindex;
21889fa3e853Sbellard     PageDesc *p, *p1;
218953a5960aSpbrook     target_ulong host_start, host_end, addr;
21909fa3e853Sbellard 
2191c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2192c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2193c8a706feSpbrook        practice it seems to be ok.  */
2194c8a706feSpbrook     mmap_lock();
2195c8a706feSpbrook 
219683fb7adfSbellard     host_start = address & qemu_host_page_mask;
21979fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
21989fa3e853Sbellard     p1 = page_find(page_index);
2199c8a706feSpbrook     if (!p1) {
2200c8a706feSpbrook         mmap_unlock();
22019fa3e853Sbellard         return 0;
2202c8a706feSpbrook     }
220383fb7adfSbellard     host_end = host_start + qemu_host_page_size;
22049fa3e853Sbellard     p = p1;
22059fa3e853Sbellard     prot = 0;
22069fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
22079fa3e853Sbellard         prot |= p->flags;
22089fa3e853Sbellard         p++;
22099fa3e853Sbellard     }
22109fa3e853Sbellard     /* if the page was really writable, then we change its
22119fa3e853Sbellard        protection back to writable */
22129fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
22139fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
22149fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
221553a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
22169fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
22179fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
22189fa3e853Sbellard             /* and since the content will be modified, we must invalidate
22199fa3e853Sbellard                the corresponding translated code. */
2220d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
22219fa3e853Sbellard #ifdef DEBUG_TB_CHECK
22229fa3e853Sbellard             tb_invalidate_check(address);
22239fa3e853Sbellard #endif
2224c8a706feSpbrook             mmap_unlock();
22259fa3e853Sbellard             return 1;
22269fa3e853Sbellard         }
22279fa3e853Sbellard     }
2228c8a706feSpbrook     mmap_unlock();
22299fa3e853Sbellard     return 0;
22309fa3e853Sbellard }
22319fa3e853Sbellard 
22326a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
22336a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
22341ccde1cbSbellard {
22351ccde1cbSbellard }
22369fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
223733417e70Sbellard 
2238e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
22398da3ff18Spbrook 
2240db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
22418da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset);
224200f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
22438da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset);
2244db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2245db7b5426Sblueswir1                       need_subpage)                                     \
2246db7b5426Sblueswir1     do {                                                                \
2247db7b5426Sblueswir1         if (addr > start_addr)                                          \
2248db7b5426Sblueswir1             start_addr2 = 0;                                            \
2249db7b5426Sblueswir1         else {                                                          \
2250db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2251db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2252db7b5426Sblueswir1                 need_subpage = 1;                                       \
2253db7b5426Sblueswir1         }                                                               \
2254db7b5426Sblueswir1                                                                         \
225549e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2256db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2257db7b5426Sblueswir1         else {                                                          \
2258db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2259db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2260db7b5426Sblueswir1                 need_subpage = 1;                                       \
2261db7b5426Sblueswir1         }                                                               \
2262db7b5426Sblueswir1     } while (0)
2263db7b5426Sblueswir1 
226433417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
226533417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
22668da3ff18Spbrook    io memory page.  The address used when calling the IO function is
22678da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
22688da3ff18Spbrook    start_region and regon_offset are rounded down to a page boundary
22698da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
22708da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
22718da3ff18Spbrook void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
227200f82b8aSaurel32                                          ram_addr_t size,
22738da3ff18Spbrook                                          ram_addr_t phys_offset,
22748da3ff18Spbrook                                          ram_addr_t region_offset)
227533417e70Sbellard {
2276108c49b8Sbellard     target_phys_addr_t addr, end_addr;
227792e873b9Sbellard     PhysPageDesc *p;
22789d42037bSbellard     CPUState *env;
227900f82b8aSaurel32     ram_addr_t orig_size = size;
2280db7b5426Sblueswir1     void *subpage;
228133417e70Sbellard 
2282da260249Sbellard #ifdef USE_KQEMU
2283da260249Sbellard     /* XXX: should not depend on cpu context */
2284da260249Sbellard     env = first_cpu;
2285da260249Sbellard     if (env->kqemu_enabled) {
2286da260249Sbellard         kqemu_set_phys_mem(start_addr, size, phys_offset);
2287da260249Sbellard     }
2288da260249Sbellard #endif
22897ba1e619Saliguori     if (kvm_enabled())
22907ba1e619Saliguori         kvm_set_phys_mem(start_addr, size, phys_offset);
22917ba1e619Saliguori 
229267c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
229367c4d23cSpbrook         region_offset = start_addr;
229467c4d23cSpbrook     }
22958da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
22965fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
229749e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
229849e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2299db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2300db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
230100f82b8aSaurel32             ram_addr_t orig_memory = p->phys_offset;
2302db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2303db7b5426Sblueswir1             int need_subpage = 0;
2304db7b5426Sblueswir1 
2305db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2306db7b5426Sblueswir1                           need_subpage);
23074254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2308db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2309db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23108da3ff18Spbrook                                            &p->phys_offset, orig_memory,
23118da3ff18Spbrook                                            p->region_offset);
2312db7b5426Sblueswir1                 } else {
2313db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2314db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2315db7b5426Sblueswir1                 }
23168da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
23178da3ff18Spbrook                                  region_offset);
23188da3ff18Spbrook                 p->region_offset = 0;
2319db7b5426Sblueswir1             } else {
2320db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2321db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2322db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2323db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2324db7b5426Sblueswir1             }
2325db7b5426Sblueswir1         } else {
2326108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
23279fa3e853Sbellard             p->phys_offset = phys_offset;
23288da3ff18Spbrook             p->region_offset = region_offset;
23292a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
23308da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
233133417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
23328da3ff18Spbrook             } else {
2333db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2334db7b5426Sblueswir1                 int need_subpage = 0;
2335db7b5426Sblueswir1 
2336db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2337db7b5426Sblueswir1                               end_addr2, need_subpage);
2338db7b5426Sblueswir1 
23394254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2340db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23418da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
234267c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2343db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
23448da3ff18Spbrook                                      phys_offset, region_offset);
23458da3ff18Spbrook                     p->region_offset = 0;
2346db7b5426Sblueswir1                 }
2347db7b5426Sblueswir1             }
2348db7b5426Sblueswir1         }
23498da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
235033417e70Sbellard     }
23519d42037bSbellard 
23529d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
23539d42037bSbellard        reset the modified entries */
23549d42037bSbellard     /* XXX: slow ! */
23559d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
23569d42037bSbellard         tlb_flush(env, 1);
23579d42037bSbellard     }
235833417e70Sbellard }
235933417e70Sbellard 
2360ba863458Sbellard /* XXX: temporary until new memory mapping API */
236100f82b8aSaurel32 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2362ba863458Sbellard {
2363ba863458Sbellard     PhysPageDesc *p;
2364ba863458Sbellard 
2365ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2366ba863458Sbellard     if (!p)
2367ba863458Sbellard         return IO_MEM_UNASSIGNED;
2368ba863458Sbellard     return p->phys_offset;
2369ba863458Sbellard }
2370ba863458Sbellard 
2371f65ed4c1Saliguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2372f65ed4c1Saliguori {
2373f65ed4c1Saliguori     if (kvm_enabled())
2374f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2375f65ed4c1Saliguori }
2376f65ed4c1Saliguori 
2377f65ed4c1Saliguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2378f65ed4c1Saliguori {
2379f65ed4c1Saliguori     if (kvm_enabled())
2380f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2381f65ed4c1Saliguori }
2382f65ed4c1Saliguori 
2383e9a1ab19Sbellard /* XXX: better than nothing */
238400f82b8aSaurel32 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2385e9a1ab19Sbellard {
2386e9a1ab19Sbellard     ram_addr_t addr;
23877fb4fdcfSbalrog     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2388012a7045Sths         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2389ed441467Sbellard                 (uint64_t)size, (uint64_t)phys_ram_size);
2390e9a1ab19Sbellard         abort();
2391e9a1ab19Sbellard     }
2392e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2393e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2394e9a1ab19Sbellard     return addr;
2395e9a1ab19Sbellard }
2396e9a1ab19Sbellard 
2397e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2398e9a1ab19Sbellard {
2399e9a1ab19Sbellard }
2400e9a1ab19Sbellard 
2401a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
240233417e70Sbellard {
240367d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2404ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
240567d3b957Spbrook #endif
24060a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2407e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2408e18231a3Sblueswir1 #endif
2409e18231a3Sblueswir1     return 0;
2410e18231a3Sblueswir1 }
2411e18231a3Sblueswir1 
2412e18231a3Sblueswir1 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2413e18231a3Sblueswir1 {
2414e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2415e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2416e18231a3Sblueswir1 #endif
24170a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2418e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2419e18231a3Sblueswir1 #endif
2420e18231a3Sblueswir1     return 0;
2421e18231a3Sblueswir1 }
2422e18231a3Sblueswir1 
2423e18231a3Sblueswir1 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2424e18231a3Sblueswir1 {
2425e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2426e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2427e18231a3Sblueswir1 #endif
24280a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2429e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2430b4f0a316Sblueswir1 #endif
243133417e70Sbellard     return 0;
243233417e70Sbellard }
243333417e70Sbellard 
2434a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
243533417e70Sbellard {
243667d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2437ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
243867d3b957Spbrook #endif
24390a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2440e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2441e18231a3Sblueswir1 #endif
2442e18231a3Sblueswir1 }
2443e18231a3Sblueswir1 
2444e18231a3Sblueswir1 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2445e18231a3Sblueswir1 {
2446e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2447e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2448e18231a3Sblueswir1 #endif
24490a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2450e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2451e18231a3Sblueswir1 #endif
2452e18231a3Sblueswir1 }
2453e18231a3Sblueswir1 
2454e18231a3Sblueswir1 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2455e18231a3Sblueswir1 {
2456e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2457e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2458e18231a3Sblueswir1 #endif
24590a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2460e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2461b4f0a316Sblueswir1 #endif
246233417e70Sbellard }
246333417e70Sbellard 
246433417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
246533417e70Sbellard     unassigned_mem_readb,
2466e18231a3Sblueswir1     unassigned_mem_readw,
2467e18231a3Sblueswir1     unassigned_mem_readl,
246833417e70Sbellard };
246933417e70Sbellard 
247033417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
247133417e70Sbellard     unassigned_mem_writeb,
2472e18231a3Sblueswir1     unassigned_mem_writew,
2473e18231a3Sblueswir1     unassigned_mem_writel,
247433417e70Sbellard };
247533417e70Sbellard 
24760f459d16Spbrook static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
24770f459d16Spbrook                                 uint32_t val)
24781ccde1cbSbellard {
24793a7d929eSbellard     int dirty_flags;
24803a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24813a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
24823a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
24833a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
24843a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24853a7d929eSbellard #endif
24863a7d929eSbellard     }
24870f459d16Spbrook     stb_p(phys_ram_base + ram_addr, val);
2488f32fc648Sbellard #ifdef USE_KQEMU
2489f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2490f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2491f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2492f32fc648Sbellard #endif
2493f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2494f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2495f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2496f23db169Sbellard        flushed */
2497f23db169Sbellard     if (dirty_flags == 0xff)
24982e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
24991ccde1cbSbellard }
25001ccde1cbSbellard 
25010f459d16Spbrook static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
25020f459d16Spbrook                                 uint32_t val)
25031ccde1cbSbellard {
25043a7d929eSbellard     int dirty_flags;
25053a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25063a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25073a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
25083a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
25093a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25103a7d929eSbellard #endif
25113a7d929eSbellard     }
25120f459d16Spbrook     stw_p(phys_ram_base + ram_addr, val);
2513f32fc648Sbellard #ifdef USE_KQEMU
2514f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2515f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2516f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2517f32fc648Sbellard #endif
2518f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2519f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2520f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2521f23db169Sbellard        flushed */
2522f23db169Sbellard     if (dirty_flags == 0xff)
25232e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
25241ccde1cbSbellard }
25251ccde1cbSbellard 
25260f459d16Spbrook static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
25270f459d16Spbrook                                 uint32_t val)
25281ccde1cbSbellard {
25293a7d929eSbellard     int dirty_flags;
25303a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25313a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25323a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
25333a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
25343a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25353a7d929eSbellard #endif
25363a7d929eSbellard     }
25370f459d16Spbrook     stl_p(phys_ram_base + ram_addr, val);
2538f32fc648Sbellard #ifdef USE_KQEMU
2539f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2540f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2541f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2542f32fc648Sbellard #endif
2543f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2544f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2545f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2546f23db169Sbellard        flushed */
2547f23db169Sbellard     if (dirty_flags == 0xff)
25482e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
25491ccde1cbSbellard }
25501ccde1cbSbellard 
25513a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
25523a7d929eSbellard     NULL, /* never used */
25533a7d929eSbellard     NULL, /* never used */
25543a7d929eSbellard     NULL, /* never used */
25553a7d929eSbellard };
25563a7d929eSbellard 
25571ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
25581ccde1cbSbellard     notdirty_mem_writeb,
25591ccde1cbSbellard     notdirty_mem_writew,
25601ccde1cbSbellard     notdirty_mem_writel,
25611ccde1cbSbellard };
25621ccde1cbSbellard 
25630f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2564b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
25650f459d16Spbrook {
25660f459d16Spbrook     CPUState *env = cpu_single_env;
256706d55cc1Saliguori     target_ulong pc, cs_base;
256806d55cc1Saliguori     TranslationBlock *tb;
25690f459d16Spbrook     target_ulong vaddr;
2570a1d1bb31Saliguori     CPUWatchpoint *wp;
257106d55cc1Saliguori     int cpu_flags;
25720f459d16Spbrook 
257306d55cc1Saliguori     if (env->watchpoint_hit) {
257406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
257506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
257606d55cc1Saliguori          * current instruction. */
257706d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
257806d55cc1Saliguori         return;
257906d55cc1Saliguori     }
25802e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2581c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2582b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2583b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
25846e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
25856e140f28Saliguori             if (!env->watchpoint_hit) {
2586a1d1bb31Saliguori                 env->watchpoint_hit = wp;
258706d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
258806d55cc1Saliguori                 if (!tb) {
25896e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
25906e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
259106d55cc1Saliguori                 }
259206d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
259306d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
259406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
259506d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
259606d55cc1Saliguori                 } else {
259706d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
259806d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
259906d55cc1Saliguori                 }
260006d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
26010f459d16Spbrook             }
26026e140f28Saliguori         } else {
26036e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
26046e140f28Saliguori         }
26050f459d16Spbrook     }
26060f459d16Spbrook }
26070f459d16Spbrook 
26086658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
26096658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
26106658ffb8Spbrook    phys routines.  */
26116658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
26126658ffb8Spbrook {
2613b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
26146658ffb8Spbrook     return ldub_phys(addr);
26156658ffb8Spbrook }
26166658ffb8Spbrook 
26176658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
26186658ffb8Spbrook {
2619b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
26206658ffb8Spbrook     return lduw_phys(addr);
26216658ffb8Spbrook }
26226658ffb8Spbrook 
26236658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
26246658ffb8Spbrook {
2625b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
26266658ffb8Spbrook     return ldl_phys(addr);
26276658ffb8Spbrook }
26286658ffb8Spbrook 
26296658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
26306658ffb8Spbrook                              uint32_t val)
26316658ffb8Spbrook {
2632b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
26336658ffb8Spbrook     stb_phys(addr, val);
26346658ffb8Spbrook }
26356658ffb8Spbrook 
26366658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
26376658ffb8Spbrook                              uint32_t val)
26386658ffb8Spbrook {
2639b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
26406658ffb8Spbrook     stw_phys(addr, val);
26416658ffb8Spbrook }
26426658ffb8Spbrook 
26436658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
26446658ffb8Spbrook                              uint32_t val)
26456658ffb8Spbrook {
2646b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
26476658ffb8Spbrook     stl_phys(addr, val);
26486658ffb8Spbrook }
26496658ffb8Spbrook 
26506658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
26516658ffb8Spbrook     watch_mem_readb,
26526658ffb8Spbrook     watch_mem_readw,
26536658ffb8Spbrook     watch_mem_readl,
26546658ffb8Spbrook };
26556658ffb8Spbrook 
26566658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
26576658ffb8Spbrook     watch_mem_writeb,
26586658ffb8Spbrook     watch_mem_writew,
26596658ffb8Spbrook     watch_mem_writel,
26606658ffb8Spbrook };
26616658ffb8Spbrook 
2662db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2663db7b5426Sblueswir1                                  unsigned int len)
2664db7b5426Sblueswir1 {
2665db7b5426Sblueswir1     uint32_t ret;
2666db7b5426Sblueswir1     unsigned int idx;
2667db7b5426Sblueswir1 
26688da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2669db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2670db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2671db7b5426Sblueswir1            mmio, len, addr, idx);
2672db7b5426Sblueswir1 #endif
26738da3ff18Spbrook     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
26748da3ff18Spbrook                                        addr + mmio->region_offset[idx][0][len]);
2675db7b5426Sblueswir1 
2676db7b5426Sblueswir1     return ret;
2677db7b5426Sblueswir1 }
2678db7b5426Sblueswir1 
2679db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2680db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2681db7b5426Sblueswir1 {
2682db7b5426Sblueswir1     unsigned int idx;
2683db7b5426Sblueswir1 
26848da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2685db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2686db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2687db7b5426Sblueswir1            mmio, len, addr, idx, value);
2688db7b5426Sblueswir1 #endif
26898da3ff18Spbrook     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
26908da3ff18Spbrook                                   addr + mmio->region_offset[idx][1][len],
26918da3ff18Spbrook                                   value);
2692db7b5426Sblueswir1 }
2693db7b5426Sblueswir1 
2694db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2695db7b5426Sblueswir1 {
2696db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2697db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2698db7b5426Sblueswir1 #endif
2699db7b5426Sblueswir1 
2700db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2701db7b5426Sblueswir1 }
2702db7b5426Sblueswir1 
2703db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2704db7b5426Sblueswir1                             uint32_t value)
2705db7b5426Sblueswir1 {
2706db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2707db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2708db7b5426Sblueswir1 #endif
2709db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2710db7b5426Sblueswir1 }
2711db7b5426Sblueswir1 
2712db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2713db7b5426Sblueswir1 {
2714db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2715db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2716db7b5426Sblueswir1 #endif
2717db7b5426Sblueswir1 
2718db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2719db7b5426Sblueswir1 }
2720db7b5426Sblueswir1 
2721db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2722db7b5426Sblueswir1                             uint32_t value)
2723db7b5426Sblueswir1 {
2724db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2725db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2726db7b5426Sblueswir1 #endif
2727db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2728db7b5426Sblueswir1 }
2729db7b5426Sblueswir1 
2730db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2731db7b5426Sblueswir1 {
2732db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2733db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2734db7b5426Sblueswir1 #endif
2735db7b5426Sblueswir1 
2736db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2737db7b5426Sblueswir1 }
2738db7b5426Sblueswir1 
2739db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2740db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2741db7b5426Sblueswir1 {
2742db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2743db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2744db7b5426Sblueswir1 #endif
2745db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2746db7b5426Sblueswir1 }
2747db7b5426Sblueswir1 
2748db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2749db7b5426Sblueswir1     &subpage_readb,
2750db7b5426Sblueswir1     &subpage_readw,
2751db7b5426Sblueswir1     &subpage_readl,
2752db7b5426Sblueswir1 };
2753db7b5426Sblueswir1 
2754db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2755db7b5426Sblueswir1     &subpage_writeb,
2756db7b5426Sblueswir1     &subpage_writew,
2757db7b5426Sblueswir1     &subpage_writel,
2758db7b5426Sblueswir1 };
2759db7b5426Sblueswir1 
2760db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
27618da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset)
2762db7b5426Sblueswir1 {
2763db7b5426Sblueswir1     int idx, eidx;
27644254fab8Sblueswir1     unsigned int i;
2765db7b5426Sblueswir1 
2766db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2767db7b5426Sblueswir1         return -1;
2768db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2769db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2770db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2771db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2772db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2773db7b5426Sblueswir1 #endif
2774db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2775db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
27764254fab8Sblueswir1         for (i = 0; i < 4; i++) {
27773ee89922Sblueswir1             if (io_mem_read[memory][i]) {
27783ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
27793ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
27808da3ff18Spbrook                 mmio->region_offset[idx][0][i] = region_offset;
27814254fab8Sblueswir1             }
27823ee89922Sblueswir1             if (io_mem_write[memory][i]) {
27833ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
27843ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
27858da3ff18Spbrook                 mmio->region_offset[idx][1][i] = region_offset;
27863ee89922Sblueswir1             }
27873ee89922Sblueswir1         }
2788db7b5426Sblueswir1     }
2789db7b5426Sblueswir1 
2790db7b5426Sblueswir1     return 0;
2791db7b5426Sblueswir1 }
2792db7b5426Sblueswir1 
279300f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
27948da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset)
2795db7b5426Sblueswir1 {
2796db7b5426Sblueswir1     subpage_t *mmio;
2797db7b5426Sblueswir1     int subpage_memory;
2798db7b5426Sblueswir1 
2799db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
28001eec614bSaliguori 
2801db7b5426Sblueswir1     mmio->base = base;
2802db7b5426Sblueswir1     subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2803db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2804db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2805db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2806db7b5426Sblueswir1 #endif
2807db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
28088da3ff18Spbrook     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
28098da3ff18Spbrook                          region_offset);
2810db7b5426Sblueswir1 
2811db7b5426Sblueswir1     return mmio;
2812db7b5426Sblueswir1 }
2813db7b5426Sblueswir1 
281488715657Saliguori static int get_free_io_mem_idx(void)
281588715657Saliguori {
281688715657Saliguori     int i;
281788715657Saliguori 
281888715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
281988715657Saliguori         if (!io_mem_used[i]) {
282088715657Saliguori             io_mem_used[i] = 1;
282188715657Saliguori             return i;
282288715657Saliguori         }
282388715657Saliguori 
282488715657Saliguori     return -1;
282588715657Saliguori }
282688715657Saliguori 
282733417e70Sbellard static void io_mem_init(void)
282833417e70Sbellard {
282988715657Saliguori     int i;
283088715657Saliguori 
28313a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2832a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
28333a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
283488715657Saliguori     for (i=0; i<5; i++)
283588715657Saliguori         io_mem_used[i] = 1;
28361ccde1cbSbellard 
28370f459d16Spbrook     io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
28386658ffb8Spbrook                                           watch_mem_write, NULL);
28391ccde1cbSbellard     /* alloc dirty bits array */
28400a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
28413a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
284233417e70Sbellard }
284333417e70Sbellard 
284433417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
284533417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
28463ee89922Sblueswir1    2). Functions can be omitted with a NULL function pointer. The
28473ee89922Sblueswir1    registered functions may be modified dynamically later.
28483ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
28494254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
28504254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
28514254fab8Sblueswir1    returned if error. */
285233417e70Sbellard int cpu_register_io_memory(int io_index,
285333417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2854a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2855a4193c8aSbellard                            void *opaque)
285633417e70Sbellard {
28574254fab8Sblueswir1     int i, subwidth = 0;
285833417e70Sbellard 
285933417e70Sbellard     if (io_index <= 0) {
286088715657Saliguori         io_index = get_free_io_mem_idx();
286188715657Saliguori         if (io_index == -1)
286288715657Saliguori             return io_index;
286333417e70Sbellard     } else {
286433417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
286533417e70Sbellard             return -1;
286633417e70Sbellard     }
286733417e70Sbellard 
286833417e70Sbellard     for(i = 0;i < 3; i++) {
28694254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
28704254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
287133417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
287233417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
287333417e70Sbellard     }
2874a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
28754254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
287633417e70Sbellard }
287761382a50Sbellard 
287888715657Saliguori void cpu_unregister_io_memory(int io_table_address)
287988715657Saliguori {
288088715657Saliguori     int i;
288188715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
288288715657Saliguori 
288388715657Saliguori     for (i=0;i < 3; i++) {
288488715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
288588715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
288688715657Saliguori     }
288788715657Saliguori     io_mem_opaque[io_index] = NULL;
288888715657Saliguori     io_mem_used[io_index] = 0;
288988715657Saliguori }
289088715657Saliguori 
28918926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
28928926b517Sbellard {
28938926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
28948926b517Sbellard }
28958926b517Sbellard 
28968926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
28978926b517Sbellard {
28988926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
28998926b517Sbellard }
29008926b517Sbellard 
2901e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2902e2eef170Spbrook 
290313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
290413eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
29052e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
290613eb76e0Sbellard                             int len, int is_write)
290713eb76e0Sbellard {
290813eb76e0Sbellard     int l, flags;
290913eb76e0Sbellard     target_ulong page;
291053a5960aSpbrook     void * p;
291113eb76e0Sbellard 
291213eb76e0Sbellard     while (len > 0) {
291313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
291413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
291513eb76e0Sbellard         if (l > len)
291613eb76e0Sbellard             l = len;
291713eb76e0Sbellard         flags = page_get_flags(page);
291813eb76e0Sbellard         if (!(flags & PAGE_VALID))
291913eb76e0Sbellard             return;
292013eb76e0Sbellard         if (is_write) {
292113eb76e0Sbellard             if (!(flags & PAGE_WRITE))
292213eb76e0Sbellard                 return;
2923579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
292472fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2925579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2926579a97f7Sbellard                 return;
292772fb7daaSaurel32             memcpy(p, buf, l);
292872fb7daaSaurel32             unlock_user(p, addr, l);
292913eb76e0Sbellard         } else {
293013eb76e0Sbellard             if (!(flags & PAGE_READ))
293113eb76e0Sbellard                 return;
2932579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
293372fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2934579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2935579a97f7Sbellard                 return;
293672fb7daaSaurel32             memcpy(buf, p, l);
29375b257578Saurel32             unlock_user(p, addr, 0);
293813eb76e0Sbellard         }
293913eb76e0Sbellard         len -= l;
294013eb76e0Sbellard         buf += l;
294113eb76e0Sbellard         addr += l;
294213eb76e0Sbellard     }
294313eb76e0Sbellard }
29448df1cd07Sbellard 
294513eb76e0Sbellard #else
29462e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
294713eb76e0Sbellard                             int len, int is_write)
294813eb76e0Sbellard {
294913eb76e0Sbellard     int l, io_index;
295013eb76e0Sbellard     uint8_t *ptr;
295113eb76e0Sbellard     uint32_t val;
29522e12669aSbellard     target_phys_addr_t page;
29532e12669aSbellard     unsigned long pd;
295492e873b9Sbellard     PhysPageDesc *p;
295513eb76e0Sbellard 
295613eb76e0Sbellard     while (len > 0) {
295713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
295813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
295913eb76e0Sbellard         if (l > len)
296013eb76e0Sbellard             l = len;
296192e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
296213eb76e0Sbellard         if (!p) {
296313eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
296413eb76e0Sbellard         } else {
296513eb76e0Sbellard             pd = p->phys_offset;
296613eb76e0Sbellard         }
296713eb76e0Sbellard 
296813eb76e0Sbellard         if (is_write) {
29693a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29706c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
297113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29728da3ff18Spbrook                 if (p)
29736c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
29746a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
29756a00d601Sbellard                    potential bugs */
29766c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
29771c213d19Sbellard                     /* 32 bit write access */
2978c27004ecSbellard                     val = ldl_p(buf);
29796c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
298013eb76e0Sbellard                     l = 4;
29816c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
29821c213d19Sbellard                     /* 16 bit write access */
2983c27004ecSbellard                     val = lduw_p(buf);
29846c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
298513eb76e0Sbellard                     l = 2;
298613eb76e0Sbellard                 } else {
29871c213d19Sbellard                     /* 8 bit write access */
2988c27004ecSbellard                     val = ldub_p(buf);
29896c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
299013eb76e0Sbellard                     l = 1;
299113eb76e0Sbellard                 }
299213eb76e0Sbellard             } else {
2993b448f2f3Sbellard                 unsigned long addr1;
2994b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
299513eb76e0Sbellard                 /* RAM case */
2996b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
299713eb76e0Sbellard                 memcpy(ptr, buf, l);
29983a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2999b448f2f3Sbellard                     /* invalidate code */
3000b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3001b448f2f3Sbellard                     /* set dirty bit */
3002f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3003f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
300413eb76e0Sbellard                 }
30053a7d929eSbellard             }
300613eb76e0Sbellard         } else {
30072a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
30082a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
30096c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
301013eb76e0Sbellard                 /* I/O case */
301113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
30128da3ff18Spbrook                 if (p)
30136c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
30146c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
301513eb76e0Sbellard                     /* 32 bit read access */
30166c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3017c27004ecSbellard                     stl_p(buf, val);
301813eb76e0Sbellard                     l = 4;
30196c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
302013eb76e0Sbellard                     /* 16 bit read access */
30216c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3022c27004ecSbellard                     stw_p(buf, val);
302313eb76e0Sbellard                     l = 2;
302413eb76e0Sbellard                 } else {
30251c213d19Sbellard                     /* 8 bit read access */
30266c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3027c27004ecSbellard                     stb_p(buf, val);
302813eb76e0Sbellard                     l = 1;
302913eb76e0Sbellard                 }
303013eb76e0Sbellard             } else {
303113eb76e0Sbellard                 /* RAM case */
303213eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
303313eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
303413eb76e0Sbellard                 memcpy(buf, ptr, l);
303513eb76e0Sbellard             }
303613eb76e0Sbellard         }
303713eb76e0Sbellard         len -= l;
303813eb76e0Sbellard         buf += l;
303913eb76e0Sbellard         addr += l;
304013eb76e0Sbellard     }
304113eb76e0Sbellard }
30428df1cd07Sbellard 
3043d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3044d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3045d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3046d0ecd2aaSbellard {
3047d0ecd2aaSbellard     int l;
3048d0ecd2aaSbellard     uint8_t *ptr;
3049d0ecd2aaSbellard     target_phys_addr_t page;
3050d0ecd2aaSbellard     unsigned long pd;
3051d0ecd2aaSbellard     PhysPageDesc *p;
3052d0ecd2aaSbellard 
3053d0ecd2aaSbellard     while (len > 0) {
3054d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3055d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3056d0ecd2aaSbellard         if (l > len)
3057d0ecd2aaSbellard             l = len;
3058d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3059d0ecd2aaSbellard         if (!p) {
3060d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3061d0ecd2aaSbellard         } else {
3062d0ecd2aaSbellard             pd = p->phys_offset;
3063d0ecd2aaSbellard         }
3064d0ecd2aaSbellard 
3065d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
30662a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
30672a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3068d0ecd2aaSbellard             /* do nothing */
3069d0ecd2aaSbellard         } else {
3070d0ecd2aaSbellard             unsigned long addr1;
3071d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3072d0ecd2aaSbellard             /* ROM/RAM case */
3073d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
3074d0ecd2aaSbellard             memcpy(ptr, buf, l);
3075d0ecd2aaSbellard         }
3076d0ecd2aaSbellard         len -= l;
3077d0ecd2aaSbellard         buf += l;
3078d0ecd2aaSbellard         addr += l;
3079d0ecd2aaSbellard     }
3080d0ecd2aaSbellard }
3081d0ecd2aaSbellard 
30826d16c2f8Saliguori typedef struct {
30836d16c2f8Saliguori     void *buffer;
30846d16c2f8Saliguori     target_phys_addr_t addr;
30856d16c2f8Saliguori     target_phys_addr_t len;
30866d16c2f8Saliguori } BounceBuffer;
30876d16c2f8Saliguori 
30886d16c2f8Saliguori static BounceBuffer bounce;
30896d16c2f8Saliguori 
3090ba223c29Saliguori typedef struct MapClient {
3091ba223c29Saliguori     void *opaque;
3092ba223c29Saliguori     void (*callback)(void *opaque);
3093ba223c29Saliguori     LIST_ENTRY(MapClient) link;
3094ba223c29Saliguori } MapClient;
3095ba223c29Saliguori 
3096ba223c29Saliguori static LIST_HEAD(map_client_list, MapClient) map_client_list
3097ba223c29Saliguori     = LIST_HEAD_INITIALIZER(map_client_list);
3098ba223c29Saliguori 
3099ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3100ba223c29Saliguori {
3101ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3102ba223c29Saliguori 
3103ba223c29Saliguori     client->opaque = opaque;
3104ba223c29Saliguori     client->callback = callback;
3105ba223c29Saliguori     LIST_INSERT_HEAD(&map_client_list, client, link);
3106ba223c29Saliguori     return client;
3107ba223c29Saliguori }
3108ba223c29Saliguori 
3109ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3110ba223c29Saliguori {
3111ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3112ba223c29Saliguori 
3113ba223c29Saliguori     LIST_REMOVE(client, link);
3114ba223c29Saliguori }
3115ba223c29Saliguori 
3116ba223c29Saliguori static void cpu_notify_map_clients(void)
3117ba223c29Saliguori {
3118ba223c29Saliguori     MapClient *client;
3119ba223c29Saliguori 
3120ba223c29Saliguori     while (!LIST_EMPTY(&map_client_list)) {
3121ba223c29Saliguori         client = LIST_FIRST(&map_client_list);
3122ba223c29Saliguori         client->callback(client->opaque);
3123ba223c29Saliguori         LIST_REMOVE(client, link);
3124ba223c29Saliguori     }
3125ba223c29Saliguori }
3126ba223c29Saliguori 
31276d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
31286d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
31296d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
31306d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3131ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3132ba223c29Saliguori  * likely to succeed.
31336d16c2f8Saliguori  */
31346d16c2f8Saliguori void *cpu_physical_memory_map(target_phys_addr_t addr,
31356d16c2f8Saliguori                               target_phys_addr_t *plen,
31366d16c2f8Saliguori                               int is_write)
31376d16c2f8Saliguori {
31386d16c2f8Saliguori     target_phys_addr_t len = *plen;
31396d16c2f8Saliguori     target_phys_addr_t done = 0;
31406d16c2f8Saliguori     int l;
31416d16c2f8Saliguori     uint8_t *ret = NULL;
31426d16c2f8Saliguori     uint8_t *ptr;
31436d16c2f8Saliguori     target_phys_addr_t page;
31446d16c2f8Saliguori     unsigned long pd;
31456d16c2f8Saliguori     PhysPageDesc *p;
31466d16c2f8Saliguori     unsigned long addr1;
31476d16c2f8Saliguori 
31486d16c2f8Saliguori     while (len > 0) {
31496d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
31506d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
31516d16c2f8Saliguori         if (l > len)
31526d16c2f8Saliguori             l = len;
31536d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
31546d16c2f8Saliguori         if (!p) {
31556d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
31566d16c2f8Saliguori         } else {
31576d16c2f8Saliguori             pd = p->phys_offset;
31586d16c2f8Saliguori         }
31596d16c2f8Saliguori 
31606d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
31616d16c2f8Saliguori             if (done || bounce.buffer) {
31626d16c2f8Saliguori                 break;
31636d16c2f8Saliguori             }
31646d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
31656d16c2f8Saliguori             bounce.addr = addr;
31666d16c2f8Saliguori             bounce.len = l;
31676d16c2f8Saliguori             if (!is_write) {
31686d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
31696d16c2f8Saliguori             }
31706d16c2f8Saliguori             ptr = bounce.buffer;
31716d16c2f8Saliguori         } else {
31726d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
31736d16c2f8Saliguori             ptr = phys_ram_base + addr1;
31746d16c2f8Saliguori         }
31756d16c2f8Saliguori         if (!done) {
31766d16c2f8Saliguori             ret = ptr;
31776d16c2f8Saliguori         } else if (ret + done != ptr) {
31786d16c2f8Saliguori             break;
31796d16c2f8Saliguori         }
31806d16c2f8Saliguori 
31816d16c2f8Saliguori         len -= l;
31826d16c2f8Saliguori         addr += l;
31836d16c2f8Saliguori         done += l;
31846d16c2f8Saliguori     }
31856d16c2f8Saliguori     *plen = done;
31866d16c2f8Saliguori     return ret;
31876d16c2f8Saliguori }
31886d16c2f8Saliguori 
31896d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
31906d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
31916d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
31926d16c2f8Saliguori  */
31936d16c2f8Saliguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
31946d16c2f8Saliguori                                int is_write, target_phys_addr_t access_len)
31956d16c2f8Saliguori {
31966d16c2f8Saliguori     if (buffer != bounce.buffer) {
31976d16c2f8Saliguori         if (is_write) {
31986d16c2f8Saliguori             unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
31996d16c2f8Saliguori             while (access_len) {
32006d16c2f8Saliguori                 unsigned l;
32016d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
32026d16c2f8Saliguori                 if (l > access_len)
32036d16c2f8Saliguori                     l = access_len;
32046d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
32056d16c2f8Saliguori                     /* invalidate code */
32066d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
32076d16c2f8Saliguori                     /* set dirty bit */
32086d16c2f8Saliguori                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
32096d16c2f8Saliguori                         (0xff & ~CODE_DIRTY_FLAG);
32106d16c2f8Saliguori                 }
32116d16c2f8Saliguori                 addr1 += l;
32126d16c2f8Saliguori                 access_len -= l;
32136d16c2f8Saliguori             }
32146d16c2f8Saliguori         }
32156d16c2f8Saliguori         return;
32166d16c2f8Saliguori     }
32176d16c2f8Saliguori     if (is_write) {
32186d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
32196d16c2f8Saliguori     }
32206d16c2f8Saliguori     qemu_free(bounce.buffer);
32216d16c2f8Saliguori     bounce.buffer = NULL;
3222ba223c29Saliguori     cpu_notify_map_clients();
32236d16c2f8Saliguori }
3224d0ecd2aaSbellard 
32258df1cd07Sbellard /* warning: addr must be aligned */
32268df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
32278df1cd07Sbellard {
32288df1cd07Sbellard     int io_index;
32298df1cd07Sbellard     uint8_t *ptr;
32308df1cd07Sbellard     uint32_t val;
32318df1cd07Sbellard     unsigned long pd;
32328df1cd07Sbellard     PhysPageDesc *p;
32338df1cd07Sbellard 
32348df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
32358df1cd07Sbellard     if (!p) {
32368df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
32378df1cd07Sbellard     } else {
32388df1cd07Sbellard         pd = p->phys_offset;
32398df1cd07Sbellard     }
32408df1cd07Sbellard 
32412a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
32422a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
32438df1cd07Sbellard         /* I/O case */
32448df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
32458da3ff18Spbrook         if (p)
32468da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
32478df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
32488df1cd07Sbellard     } else {
32498df1cd07Sbellard         /* RAM case */
32508df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
32518df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
32528df1cd07Sbellard         val = ldl_p(ptr);
32538df1cd07Sbellard     }
32548df1cd07Sbellard     return val;
32558df1cd07Sbellard }
32568df1cd07Sbellard 
325784b7b8e7Sbellard /* warning: addr must be aligned */
325884b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
325984b7b8e7Sbellard {
326084b7b8e7Sbellard     int io_index;
326184b7b8e7Sbellard     uint8_t *ptr;
326284b7b8e7Sbellard     uint64_t val;
326384b7b8e7Sbellard     unsigned long pd;
326484b7b8e7Sbellard     PhysPageDesc *p;
326584b7b8e7Sbellard 
326684b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
326784b7b8e7Sbellard     if (!p) {
326884b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
326984b7b8e7Sbellard     } else {
327084b7b8e7Sbellard         pd = p->phys_offset;
327184b7b8e7Sbellard     }
327284b7b8e7Sbellard 
32732a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
32742a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
327584b7b8e7Sbellard         /* I/O case */
327684b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
32778da3ff18Spbrook         if (p)
32788da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
327984b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
328084b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
328184b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
328284b7b8e7Sbellard #else
328384b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
328484b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
328584b7b8e7Sbellard #endif
328684b7b8e7Sbellard     } else {
328784b7b8e7Sbellard         /* RAM case */
328884b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
328984b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
329084b7b8e7Sbellard         val = ldq_p(ptr);
329184b7b8e7Sbellard     }
329284b7b8e7Sbellard     return val;
329384b7b8e7Sbellard }
329484b7b8e7Sbellard 
3295aab33094Sbellard /* XXX: optimize */
3296aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
3297aab33094Sbellard {
3298aab33094Sbellard     uint8_t val;
3299aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3300aab33094Sbellard     return val;
3301aab33094Sbellard }
3302aab33094Sbellard 
3303aab33094Sbellard /* XXX: optimize */
3304aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
3305aab33094Sbellard {
3306aab33094Sbellard     uint16_t val;
3307aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3308aab33094Sbellard     return tswap16(val);
3309aab33094Sbellard }
3310aab33094Sbellard 
33118df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
33128df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
33138df1cd07Sbellard    bits are used to track modified PTEs */
33148df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
33158df1cd07Sbellard {
33168df1cd07Sbellard     int io_index;
33178df1cd07Sbellard     uint8_t *ptr;
33188df1cd07Sbellard     unsigned long pd;
33198df1cd07Sbellard     PhysPageDesc *p;
33208df1cd07Sbellard 
33218df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33228df1cd07Sbellard     if (!p) {
33238df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33248df1cd07Sbellard     } else {
33258df1cd07Sbellard         pd = p->phys_offset;
33268df1cd07Sbellard     }
33278df1cd07Sbellard 
33283a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
33298df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33308da3ff18Spbrook         if (p)
33318da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33328df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
33338df1cd07Sbellard     } else {
333474576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
333574576198Saliguori         ptr = phys_ram_base + addr1;
33368df1cd07Sbellard         stl_p(ptr, val);
333774576198Saliguori 
333874576198Saliguori         if (unlikely(in_migration)) {
333974576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
334074576198Saliguori                 /* invalidate code */
334174576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
334274576198Saliguori                 /* set dirty bit */
334374576198Saliguori                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
334474576198Saliguori                     (0xff & ~CODE_DIRTY_FLAG);
334574576198Saliguori             }
334674576198Saliguori         }
33478df1cd07Sbellard     }
33488df1cd07Sbellard }
33498df1cd07Sbellard 
3350bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3351bc98a7efSj_mayer {
3352bc98a7efSj_mayer     int io_index;
3353bc98a7efSj_mayer     uint8_t *ptr;
3354bc98a7efSj_mayer     unsigned long pd;
3355bc98a7efSj_mayer     PhysPageDesc *p;
3356bc98a7efSj_mayer 
3357bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3358bc98a7efSj_mayer     if (!p) {
3359bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3360bc98a7efSj_mayer     } else {
3361bc98a7efSj_mayer         pd = p->phys_offset;
3362bc98a7efSj_mayer     }
3363bc98a7efSj_mayer 
3364bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3365bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33668da3ff18Spbrook         if (p)
33678da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3368bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3369bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3370bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3371bc98a7efSj_mayer #else
3372bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3373bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3374bc98a7efSj_mayer #endif
3375bc98a7efSj_mayer     } else {
3376bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3377bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3378bc98a7efSj_mayer         stq_p(ptr, val);
3379bc98a7efSj_mayer     }
3380bc98a7efSj_mayer }
3381bc98a7efSj_mayer 
33828df1cd07Sbellard /* warning: addr must be aligned */
33838df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
33848df1cd07Sbellard {
33858df1cd07Sbellard     int io_index;
33868df1cd07Sbellard     uint8_t *ptr;
33878df1cd07Sbellard     unsigned long pd;
33888df1cd07Sbellard     PhysPageDesc *p;
33898df1cd07Sbellard 
33908df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33918df1cd07Sbellard     if (!p) {
33928df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33938df1cd07Sbellard     } else {
33948df1cd07Sbellard         pd = p->phys_offset;
33958df1cd07Sbellard     }
33968df1cd07Sbellard 
33973a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
33988df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33998da3ff18Spbrook         if (p)
34008da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34018df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
34028df1cd07Sbellard     } else {
34038df1cd07Sbellard         unsigned long addr1;
34048df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
34058df1cd07Sbellard         /* RAM case */
34068df1cd07Sbellard         ptr = phys_ram_base + addr1;
34078df1cd07Sbellard         stl_p(ptr, val);
34083a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
34098df1cd07Sbellard             /* invalidate code */
34108df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
34118df1cd07Sbellard             /* set dirty bit */
3412f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3413f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
34148df1cd07Sbellard         }
34158df1cd07Sbellard     }
34163a7d929eSbellard }
34178df1cd07Sbellard 
3418aab33094Sbellard /* XXX: optimize */
3419aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
3420aab33094Sbellard {
3421aab33094Sbellard     uint8_t v = val;
3422aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3423aab33094Sbellard }
3424aab33094Sbellard 
3425aab33094Sbellard /* XXX: optimize */
3426aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
3427aab33094Sbellard {
3428aab33094Sbellard     uint16_t v = tswap16(val);
3429aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3430aab33094Sbellard }
3431aab33094Sbellard 
3432aab33094Sbellard /* XXX: optimize */
3433aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
3434aab33094Sbellard {
3435aab33094Sbellard     val = tswap64(val);
3436aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3437aab33094Sbellard }
3438aab33094Sbellard 
343913eb76e0Sbellard #endif
344013eb76e0Sbellard 
344113eb76e0Sbellard /* virtual memory access for debug */
3442b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3443b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
344413eb76e0Sbellard {
344513eb76e0Sbellard     int l;
34469b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
34479b3c35e0Sj_mayer     target_ulong page;
344813eb76e0Sbellard 
344913eb76e0Sbellard     while (len > 0) {
345013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
345113eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
345213eb76e0Sbellard         /* if no physical page mapped, return an error */
345313eb76e0Sbellard         if (phys_addr == -1)
345413eb76e0Sbellard             return -1;
345513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
345613eb76e0Sbellard         if (l > len)
345713eb76e0Sbellard             l = len;
3458b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3459b448f2f3Sbellard                                buf, l, is_write);
346013eb76e0Sbellard         len -= l;
346113eb76e0Sbellard         buf += l;
346213eb76e0Sbellard         addr += l;
346313eb76e0Sbellard     }
346413eb76e0Sbellard     return 0;
346513eb76e0Sbellard }
346613eb76e0Sbellard 
34672e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
34682e70f6efSpbrook    must be at the end of the TB */
34692e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
34702e70f6efSpbrook {
34712e70f6efSpbrook     TranslationBlock *tb;
34722e70f6efSpbrook     uint32_t n, cflags;
34732e70f6efSpbrook     target_ulong pc, cs_base;
34742e70f6efSpbrook     uint64_t flags;
34752e70f6efSpbrook 
34762e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
34772e70f6efSpbrook     if (!tb) {
34782e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
34792e70f6efSpbrook                   retaddr);
34802e70f6efSpbrook     }
34812e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
34822e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
34832e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3484bf20dc07Sths        occurred.  */
34852e70f6efSpbrook     n = n - env->icount_decr.u16.low;
34862e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
34872e70f6efSpbrook     n++;
34882e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
34892e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3490bf20dc07Sths        the first instruction in a TB then re-execute the preceding
34912e70f6efSpbrook        branch.  */
34922e70f6efSpbrook #if defined(TARGET_MIPS)
34932e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
34942e70f6efSpbrook         env->active_tc.PC -= 4;
34952e70f6efSpbrook         env->icount_decr.u16.low++;
34962e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
34972e70f6efSpbrook     }
34982e70f6efSpbrook #elif defined(TARGET_SH4)
34992e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
35002e70f6efSpbrook             && n > 1) {
35012e70f6efSpbrook         env->pc -= 2;
35022e70f6efSpbrook         env->icount_decr.u16.low++;
35032e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
35042e70f6efSpbrook     }
35052e70f6efSpbrook #endif
35062e70f6efSpbrook     /* This should never happen.  */
35072e70f6efSpbrook     if (n > CF_COUNT_MASK)
35082e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
35092e70f6efSpbrook 
35102e70f6efSpbrook     cflags = n | CF_LAST_IO;
35112e70f6efSpbrook     pc = tb->pc;
35122e70f6efSpbrook     cs_base = tb->cs_base;
35132e70f6efSpbrook     flags = tb->flags;
35142e70f6efSpbrook     tb_phys_invalidate(tb, -1);
35152e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
35162e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
35172e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3518bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
35192e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
35202e70f6efSpbrook        repeating the fault, which is horribly inefficient.
35212e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
35222e70f6efSpbrook        second new TB.  */
35232e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
35242e70f6efSpbrook }
35252e70f6efSpbrook 
3526e3db7226Sbellard void dump_exec_info(FILE *f,
3527e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3528e3db7226Sbellard {
3529e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3530e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3531e3db7226Sbellard     TranslationBlock *tb;
3532e3db7226Sbellard 
3533e3db7226Sbellard     target_code_size = 0;
3534e3db7226Sbellard     max_target_code_size = 0;
3535e3db7226Sbellard     cross_page = 0;
3536e3db7226Sbellard     direct_jmp_count = 0;
3537e3db7226Sbellard     direct_jmp2_count = 0;
3538e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3539e3db7226Sbellard         tb = &tbs[i];
3540e3db7226Sbellard         target_code_size += tb->size;
3541e3db7226Sbellard         if (tb->size > max_target_code_size)
3542e3db7226Sbellard             max_target_code_size = tb->size;
3543e3db7226Sbellard         if (tb->page_addr[1] != -1)
3544e3db7226Sbellard             cross_page++;
3545e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3546e3db7226Sbellard             direct_jmp_count++;
3547e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3548e3db7226Sbellard                 direct_jmp2_count++;
3549e3db7226Sbellard             }
3550e3db7226Sbellard         }
3551e3db7226Sbellard     }
3552e3db7226Sbellard     /* XXX: avoid using doubles ? */
355357fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
355426a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
355526a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
355626a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
355726a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3558e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3559e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3560e3db7226Sbellard                 max_target_code_size);
3561e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3562e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3563e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3564e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3565e3db7226Sbellard             cross_page,
3566e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3567e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3568e3db7226Sbellard                 direct_jmp_count,
3569e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3570e3db7226Sbellard                 direct_jmp2_count,
3571e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
357257fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3573e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3574e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3575e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3576b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3577e3db7226Sbellard }
3578e3db7226Sbellard 
357961382a50Sbellard #if !defined(CONFIG_USER_ONLY)
358061382a50Sbellard 
358161382a50Sbellard #define MMUSUFFIX _cmmu
358261382a50Sbellard #define GETPC() NULL
358361382a50Sbellard #define env cpu_single_env
3584b769d8feSbellard #define SOFTMMU_CODE_ACCESS
358561382a50Sbellard 
358661382a50Sbellard #define SHIFT 0
358761382a50Sbellard #include "softmmu_template.h"
358861382a50Sbellard 
358961382a50Sbellard #define SHIFT 1
359061382a50Sbellard #include "softmmu_template.h"
359161382a50Sbellard 
359261382a50Sbellard #define SHIFT 2
359361382a50Sbellard #include "softmmu_template.h"
359461382a50Sbellard 
359561382a50Sbellard #define SHIFT 3
359661382a50Sbellard #include "softmmu_template.h"
359761382a50Sbellard 
359861382a50Sbellard #undef env
359961382a50Sbellard 
360061382a50Sbellard #endif
3601