xref: /qemu/system/physmem.c (revision 6c2934db949aa259ed47b126b5c6838ac57a3f6f)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
18fad6cb1aSaurel32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
224fddf62aSths #define WIN32_LEAN_AND_MEAN
23d5a8f07cSbellard #include <windows.h>
24d5a8f07cSbellard #else
25a98d49b1Sbellard #include <sys/types.h>
26d5a8f07cSbellard #include <sys/mman.h>
27d5a8f07cSbellard #endif
2854936004Sbellard #include <stdlib.h>
2954936004Sbellard #include <stdio.h>
3054936004Sbellard #include <stdarg.h>
3154936004Sbellard #include <string.h>
3254936004Sbellard #include <errno.h>
3354936004Sbellard #include <unistd.h>
3454936004Sbellard #include <inttypes.h>
3554936004Sbellard 
366180a181Sbellard #include "cpu.h"
376180a181Sbellard #include "exec-all.h"
38ca10f867Saurel32 #include "qemu-common.h"
39b67d9a52Sbellard #include "tcg.h"
40b3c7724cSpbrook #include "hw/hw.h"
4174576198Saliguori #include "osdep.h"
427ba1e619Saliguori #include "kvm.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
4553a5960aSpbrook #endif
4654936004Sbellard 
47fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4866e85a21Sbellard //#define DEBUG_FLUSH
499fa3e853Sbellard //#define DEBUG_TLB
5067d3b957Spbrook //#define DEBUG_UNASSIGNED
51fd6ce8f6Sbellard 
52fd6ce8f6Sbellard /* make various TB consistency checks */
53fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5498857888Sbellard //#define DEBUG_TLB_CHECK
55fd6ce8f6Sbellard 
561196be37Sths //#define DEBUG_IOPORT
57db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
581196be37Sths 
5999773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
6099773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
6199773bd4Spbrook #undef DEBUG_TB_CHECK
6299773bd4Spbrook #endif
6399773bd4Spbrook 
649fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
659fa3e853Sbellard 
669fa3e853Sbellard #define MMAP_AREA_START        0x00000000
679fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
68fd6ce8f6Sbellard 
69108c49b8Sbellard #if defined(TARGET_SPARC64)
70108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
715dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
725dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
74bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
75bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
76108c49b8Sbellard #elif defined(TARGET_PPC64)
77108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
7800f82b8aSaurel32 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
7900f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
8000f82b8aSaurel32 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
8100f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82108c49b8Sbellard #else
83108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
85108c49b8Sbellard #endif
86108c49b8Sbellard 
87bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8826a5f13bSbellard int code_gen_max_blocks;
899fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90bdaf78e0Sblueswir1 static int nb_tbs;
91eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
92eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93fd6ce8f6Sbellard 
94141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
95141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
97d03d860bSblueswir1  section close to code segment. */
98d03d860bSblueswir1 #define code_gen_section                                \
99d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
100d03d860bSblueswir1     __attribute__((aligned (32)))
101d03d860bSblueswir1 #else
102d03d860bSblueswir1 #define code_gen_section                                \
103d03d860bSblueswir1     __attribute__((aligned (32)))
104d03d860bSblueswir1 #endif
105d03d860bSblueswir1 
106d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
107bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
108bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10926a5f13bSbellard /* threshold to flush the translated code buffer */
110bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
111fd6ce8f6Sbellard uint8_t *code_gen_ptr;
112fd6ce8f6Sbellard 
113e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
11400f82b8aSaurel32 ram_addr_t phys_ram_size;
1159fa3e853Sbellard int phys_ram_fd;
1169fa3e853Sbellard uint8_t *phys_ram_base;
1171ccde1cbSbellard uint8_t *phys_ram_dirty;
11874576198Saliguori static int in_migration;
119e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
120e2eef170Spbrook #endif
1219fa3e853Sbellard 
1226a00d601Sbellard CPUState *first_cpu;
1236a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1246a00d601Sbellard    cpu_exec() */
1256a00d601Sbellard CPUState *cpu_single_env;
1262e70f6efSpbrook /* 0 = Do not count executed instructions.
127bf20dc07Sths    1 = Precise instruction counting.
1282e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1292e70f6efSpbrook int use_icount = 0;
1302e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1312e70f6efSpbrook    include some instructions that have not yet been executed.  */
1322e70f6efSpbrook int64_t qemu_icount;
1336a00d601Sbellard 
13454936004Sbellard typedef struct PageDesc {
13592e873b9Sbellard     /* list of TBs intersecting this ram page */
136fd6ce8f6Sbellard     TranslationBlock *first_tb;
1379fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1389fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1399fa3e853Sbellard     unsigned int code_write_count;
1409fa3e853Sbellard     uint8_t *code_bitmap;
1419fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1429fa3e853Sbellard     unsigned long flags;
1439fa3e853Sbellard #endif
14454936004Sbellard } PageDesc;
14554936004Sbellard 
14692e873b9Sbellard typedef struct PhysPageDesc {
1470f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
14800f82b8aSaurel32     ram_addr_t phys_offset;
1498da3ff18Spbrook     ram_addr_t region_offset;
15092e873b9Sbellard } PhysPageDesc;
15192e873b9Sbellard 
15254936004Sbellard #define L2_BITS 10
153bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
155bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
156bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
157bedb69eaSj_mayer  */
158bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159bedb69eaSj_mayer #else
16003875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161bedb69eaSj_mayer #endif
16254936004Sbellard 
16354936004Sbellard #define L1_SIZE (1 << L1_BITS)
16454936004Sbellard #define L2_SIZE (1 << L2_BITS)
16554936004Sbellard 
16683fb7adfSbellard unsigned long qemu_real_host_page_size;
16783fb7adfSbellard unsigned long qemu_host_page_bits;
16883fb7adfSbellard unsigned long qemu_host_page_size;
16983fb7adfSbellard unsigned long qemu_host_page_mask;
17054936004Sbellard 
17192e873b9Sbellard /* XXX: for system emulation, it could just be an array */
17254936004Sbellard static PageDesc *l1_map[L1_SIZE];
173bdaf78e0Sblueswir1 static PhysPageDesc **l1_phys_map;
17454936004Sbellard 
175e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
176e2eef170Spbrook static void io_mem_init(void);
177e2eef170Spbrook 
17833417e70Sbellard /* io memory support */
17933417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
18033417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
18288715657Saliguori char io_mem_used[IO_MEM_NB_ENTRIES];
1836658ffb8Spbrook static int io_mem_watch;
1846658ffb8Spbrook #endif
18533417e70Sbellard 
18634865134Sbellard /* log support */
187d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
18834865134Sbellard FILE *logfile;
18934865134Sbellard int loglevel;
190e735b91cSpbrook static int log_append = 0;
19134865134Sbellard 
192e3db7226Sbellard /* statistics */
193e3db7226Sbellard static int tlb_flush_count;
194e3db7226Sbellard static int tb_flush_count;
195e3db7226Sbellard static int tb_phys_invalidate_count;
196e3db7226Sbellard 
197db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198db7b5426Sblueswir1 typedef struct subpage_t {
199db7b5426Sblueswir1     target_phys_addr_t base;
2003ee89922Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
2013ee89922Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
2023ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
2038da3ff18Spbrook     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204db7b5426Sblueswir1 } subpage_t;
205db7b5426Sblueswir1 
2067cb69caeSbellard #ifdef _WIN32
2077cb69caeSbellard static void map_exec(void *addr, long size)
2087cb69caeSbellard {
2097cb69caeSbellard     DWORD old_protect;
2107cb69caeSbellard     VirtualProtect(addr, size,
2117cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2127cb69caeSbellard 
2137cb69caeSbellard }
2147cb69caeSbellard #else
2157cb69caeSbellard static void map_exec(void *addr, long size)
2167cb69caeSbellard {
2174369415fSbellard     unsigned long start, end, page_size;
2187cb69caeSbellard 
2194369415fSbellard     page_size = getpagesize();
2207cb69caeSbellard     start = (unsigned long)addr;
2214369415fSbellard     start &= ~(page_size - 1);
2227cb69caeSbellard 
2237cb69caeSbellard     end = (unsigned long)addr + size;
2244369415fSbellard     end += page_size - 1;
2254369415fSbellard     end &= ~(page_size - 1);
2267cb69caeSbellard 
2277cb69caeSbellard     mprotect((void *)start, end - start,
2287cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2297cb69caeSbellard }
2307cb69caeSbellard #endif
2317cb69caeSbellard 
232b346ff46Sbellard static void page_init(void)
23354936004Sbellard {
23483fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
23554936004Sbellard        TARGET_PAGE_SIZE */
236c2b48b69Saliguori #ifdef _WIN32
237c2b48b69Saliguori     {
238c2b48b69Saliguori         SYSTEM_INFO system_info;
239c2b48b69Saliguori 
240c2b48b69Saliguori         GetSystemInfo(&system_info);
241c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
242c2b48b69Saliguori     }
243c2b48b69Saliguori #else
244c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
245c2b48b69Saliguori #endif
24683fb7adfSbellard     if (qemu_host_page_size == 0)
24783fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
24883fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
24983fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
25083fb7adfSbellard     qemu_host_page_bits = 0;
25183fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
25283fb7adfSbellard         qemu_host_page_bits++;
25383fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
254108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
25650a9569bSbalrog 
25750a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
25850a9569bSbalrog     {
25950a9569bSbalrog         long long startaddr, endaddr;
26050a9569bSbalrog         FILE *f;
26150a9569bSbalrog         int n;
26250a9569bSbalrog 
263c8a706feSpbrook         mmap_lock();
2640776590dSpbrook         last_brk = (unsigned long)sbrk(0);
26550a9569bSbalrog         f = fopen("/proc/self/maps", "r");
26650a9569bSbalrog         if (f) {
26750a9569bSbalrog             do {
26850a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
26950a9569bSbalrog                 if (n == 2) {
270e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
271e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
273e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
27550a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
27650a9569bSbalrog                                    PAGE_RESERVED);
27750a9569bSbalrog                 }
27850a9569bSbalrog             } while (!feof(f));
27950a9569bSbalrog             fclose(f);
28050a9569bSbalrog         }
281c8a706feSpbrook         mmap_unlock();
28250a9569bSbalrog     }
28350a9569bSbalrog #endif
28454936004Sbellard }
28554936004Sbellard 
286434929bfSaliguori static inline PageDesc **page_l1_map(target_ulong index)
28754936004Sbellard {
28817e2377aSpbrook #if TARGET_LONG_BITS > 32
28917e2377aSpbrook     /* Host memory outside guest VM.  For 32-bit targets we have already
29017e2377aSpbrook        excluded high addresses.  */
291d8173e0fSths     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
29217e2377aSpbrook         return NULL;
29317e2377aSpbrook #endif
294434929bfSaliguori     return &l1_map[index >> L2_BITS];
295434929bfSaliguori }
296434929bfSaliguori 
297434929bfSaliguori static inline PageDesc *page_find_alloc(target_ulong index)
298434929bfSaliguori {
299434929bfSaliguori     PageDesc **lp, *p;
300434929bfSaliguori     lp = page_l1_map(index);
301434929bfSaliguori     if (!lp)
302434929bfSaliguori         return NULL;
303434929bfSaliguori 
30454936004Sbellard     p = *lp;
30554936004Sbellard     if (!p) {
30654936004Sbellard         /* allocate if not found */
30717e2377aSpbrook #if defined(CONFIG_USER_ONLY)
30817e2377aSpbrook         size_t len = sizeof(PageDesc) * L2_SIZE;
30917e2377aSpbrook         /* Don't use qemu_malloc because it may recurse.  */
31017e2377aSpbrook         p = mmap(0, len, PROT_READ | PROT_WRITE,
31117e2377aSpbrook                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
31254936004Sbellard         *lp = p;
313fb1c2cd7Saurel32         if (h2g_valid(p)) {
314fb1c2cd7Saurel32             unsigned long addr = h2g(p);
31517e2377aSpbrook             page_set_flags(addr & TARGET_PAGE_MASK,
31617e2377aSpbrook                            TARGET_PAGE_ALIGN(addr + len),
31717e2377aSpbrook                            PAGE_RESERVED);
31817e2377aSpbrook         }
31917e2377aSpbrook #else
32017e2377aSpbrook         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
32117e2377aSpbrook         *lp = p;
32217e2377aSpbrook #endif
32354936004Sbellard     }
32454936004Sbellard     return p + (index & (L2_SIZE - 1));
32554936004Sbellard }
32654936004Sbellard 
32700f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
32854936004Sbellard {
329434929bfSaliguori     PageDesc **lp, *p;
330434929bfSaliguori     lp = page_l1_map(index);
331434929bfSaliguori     if (!lp)
332434929bfSaliguori         return NULL;
33354936004Sbellard 
334434929bfSaliguori     p = *lp;
33554936004Sbellard     if (!p)
33654936004Sbellard         return 0;
337fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
33854936004Sbellard }
33954936004Sbellard 
340108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
34192e873b9Sbellard {
342108c49b8Sbellard     void **lp, **p;
343e3f4e2a4Spbrook     PhysPageDesc *pd;
34492e873b9Sbellard 
345108c49b8Sbellard     p = (void **)l1_phys_map;
346108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347108c49b8Sbellard 
348108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350108c49b8Sbellard #endif
351108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
35292e873b9Sbellard     p = *lp;
35392e873b9Sbellard     if (!p) {
35492e873b9Sbellard         /* allocate if not found */
355108c49b8Sbellard         if (!alloc)
356108c49b8Sbellard             return NULL;
357108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
359108c49b8Sbellard         *lp = p;
360108c49b8Sbellard     }
361108c49b8Sbellard #endif
362108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363e3f4e2a4Spbrook     pd = *lp;
364e3f4e2a4Spbrook     if (!pd) {
365e3f4e2a4Spbrook         int i;
366108c49b8Sbellard         /* allocate if not found */
367108c49b8Sbellard         if (!alloc)
368108c49b8Sbellard             return NULL;
369e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370e3f4e2a4Spbrook         *lp = pd;
371e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
372e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
37392e873b9Sbellard     }
374e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
37592e873b9Sbellard }
37692e873b9Sbellard 
377108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
37892e873b9Sbellard {
379108c49b8Sbellard     return phys_page_find_alloc(index, 0);
38092e873b9Sbellard }
38192e873b9Sbellard 
3829fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3836a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3843a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3853a7d929eSbellard                                     target_ulong vaddr);
386c8a706feSpbrook #define mmap_lock() do { } while(0)
387c8a706feSpbrook #define mmap_unlock() do { } while(0)
3889fa3e853Sbellard #endif
389fd6ce8f6Sbellard 
3904369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
3914369415fSbellard 
3924369415fSbellard #if defined(CONFIG_USER_ONLY)
3934369415fSbellard /* Currently it is not recommanded to allocate big chunks of data in
3944369415fSbellard    user mode. It will change when a dedicated libc will be used */
3954369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
3964369415fSbellard #endif
3974369415fSbellard 
3984369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
3994369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
4004369415fSbellard #endif
4014369415fSbellard 
4028fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
40326a5f13bSbellard {
4044369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4054369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4064369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4074369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4084369415fSbellard #else
40926a5f13bSbellard     code_gen_buffer_size = tb_size;
41026a5f13bSbellard     if (code_gen_buffer_size == 0) {
4114369415fSbellard #if defined(CONFIG_USER_ONLY)
4124369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4134369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4144369415fSbellard #else
41526a5f13bSbellard         /* XXX: needs ajustments */
416174a9a1fSaliguori         code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4174369415fSbellard #endif
41826a5f13bSbellard     }
41926a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
42026a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
42126a5f13bSbellard     /* The code gen buffer location may have constraints depending on
42226a5f13bSbellard        the host cpu and OS */
42326a5f13bSbellard #if defined(__linux__)
42426a5f13bSbellard     {
42526a5f13bSbellard         int flags;
426141ac468Sblueswir1         void *start = NULL;
427141ac468Sblueswir1 
42826a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
42926a5f13bSbellard #if defined(__x86_64__)
43026a5f13bSbellard         flags |= MAP_32BIT;
43126a5f13bSbellard         /* Cannot map more than that */
43226a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
43326a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
434141ac468Sblueswir1 #elif defined(__sparc_v9__)
435141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
436141ac468Sblueswir1         flags |= MAP_FIXED;
437141ac468Sblueswir1         start = (void *) 0x60000000UL;
438141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
439141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
4401cb0661eSbalrog #elif defined(__arm__)
44163d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
4421cb0661eSbalrog         flags |= MAP_FIXED;
4431cb0661eSbalrog         start = (void *) 0x01000000UL;
4441cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
4451cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
44626a5f13bSbellard #endif
447141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
44826a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
44926a5f13bSbellard                                flags, -1, 0);
45026a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
45126a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
45226a5f13bSbellard             exit(1);
45326a5f13bSbellard         }
45426a5f13bSbellard     }
45506e67a82Saliguori #elif defined(__FreeBSD__)
45606e67a82Saliguori     {
45706e67a82Saliguori         int flags;
45806e67a82Saliguori         void *addr = NULL;
45906e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
46006e67a82Saliguori #if defined(__x86_64__)
46106e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
46206e67a82Saliguori          * 0x40000000 is free */
46306e67a82Saliguori         flags |= MAP_FIXED;
46406e67a82Saliguori         addr = (void *)0x40000000;
46506e67a82Saliguori         /* Cannot map more than that */
46606e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
46706e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
46806e67a82Saliguori #endif
46906e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
47006e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
47106e67a82Saliguori                                flags, -1, 0);
47206e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
47306e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
47406e67a82Saliguori             exit(1);
47506e67a82Saliguori         }
47606e67a82Saliguori     }
47726a5f13bSbellard #else
47826a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
47926a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
48026a5f13bSbellard #endif
4814369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
48226a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
48326a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
48426a5f13bSbellard         code_gen_max_block_size();
48526a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
48626a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
48726a5f13bSbellard }
48826a5f13bSbellard 
48926a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
49026a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
49126a5f13bSbellard    size. */
49226a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
49326a5f13bSbellard {
49426a5f13bSbellard     cpu_gen_init();
49526a5f13bSbellard     code_gen_alloc(tb_size);
49626a5f13bSbellard     code_gen_ptr = code_gen_buffer;
4974369415fSbellard     page_init();
498e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
49926a5f13bSbellard     io_mem_init();
500e2eef170Spbrook #endif
50126a5f13bSbellard }
50226a5f13bSbellard 
5039656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5049656f324Spbrook 
5059656f324Spbrook #define CPU_COMMON_SAVE_VERSION 1
5069656f324Spbrook 
5079656f324Spbrook static void cpu_common_save(QEMUFile *f, void *opaque)
5089656f324Spbrook {
5099656f324Spbrook     CPUState *env = opaque;
5109656f324Spbrook 
5119656f324Spbrook     qemu_put_be32s(f, &env->halted);
5129656f324Spbrook     qemu_put_be32s(f, &env->interrupt_request);
5139656f324Spbrook }
5149656f324Spbrook 
5159656f324Spbrook static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
5169656f324Spbrook {
5179656f324Spbrook     CPUState *env = opaque;
5189656f324Spbrook 
5199656f324Spbrook     if (version_id != CPU_COMMON_SAVE_VERSION)
5209656f324Spbrook         return -EINVAL;
5219656f324Spbrook 
5229656f324Spbrook     qemu_get_be32s(f, &env->halted);
52375f482aeSpbrook     qemu_get_be32s(f, &env->interrupt_request);
5249656f324Spbrook     tlb_flush(env, 1);
5259656f324Spbrook 
5269656f324Spbrook     return 0;
5279656f324Spbrook }
5289656f324Spbrook #endif
5299656f324Spbrook 
5306a00d601Sbellard void cpu_exec_init(CPUState *env)
531fd6ce8f6Sbellard {
5326a00d601Sbellard     CPUState **penv;
5336a00d601Sbellard     int cpu_index;
5346a00d601Sbellard 
5356a00d601Sbellard     env->next_cpu = NULL;
5366a00d601Sbellard     penv = &first_cpu;
5376a00d601Sbellard     cpu_index = 0;
5386a00d601Sbellard     while (*penv != NULL) {
5396a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
5406a00d601Sbellard         cpu_index++;
5416a00d601Sbellard     }
5426a00d601Sbellard     env->cpu_index = cpu_index;
543c0ce998eSaliguori     TAILQ_INIT(&env->breakpoints);
544c0ce998eSaliguori     TAILQ_INIT(&env->watchpoints);
5456a00d601Sbellard     *penv = env;
546b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5479656f324Spbrook     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
5489656f324Spbrook                     cpu_common_save, cpu_common_load, env);
549b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
550b3c7724cSpbrook                     cpu_save, cpu_load, env);
551b3c7724cSpbrook #endif
552fd6ce8f6Sbellard }
553fd6ce8f6Sbellard 
5549fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
5559fa3e853Sbellard {
5569fa3e853Sbellard     if (p->code_bitmap) {
55759817ccbSbellard         qemu_free(p->code_bitmap);
5589fa3e853Sbellard         p->code_bitmap = NULL;
5599fa3e853Sbellard     }
5609fa3e853Sbellard     p->code_write_count = 0;
5619fa3e853Sbellard }
5629fa3e853Sbellard 
563fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
564fd6ce8f6Sbellard static void page_flush_tb(void)
565fd6ce8f6Sbellard {
566fd6ce8f6Sbellard     int i, j;
567fd6ce8f6Sbellard     PageDesc *p;
568fd6ce8f6Sbellard 
569fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
570fd6ce8f6Sbellard         p = l1_map[i];
571fd6ce8f6Sbellard         if (p) {
5729fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
5739fa3e853Sbellard                 p->first_tb = NULL;
5749fa3e853Sbellard                 invalidate_page_bitmap(p);
5759fa3e853Sbellard                 p++;
5769fa3e853Sbellard             }
577fd6ce8f6Sbellard         }
578fd6ce8f6Sbellard     }
579fd6ce8f6Sbellard }
580fd6ce8f6Sbellard 
581fd6ce8f6Sbellard /* flush all the translation blocks */
582d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
5836a00d601Sbellard void tb_flush(CPUState *env1)
584fd6ce8f6Sbellard {
5856a00d601Sbellard     CPUState *env;
5860124311eSbellard #if defined(DEBUG_FLUSH)
587ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
588ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
589ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
590ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
591fd6ce8f6Sbellard #endif
59226a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
593a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
594a208e54aSpbrook 
595fd6ce8f6Sbellard     nb_tbs = 0;
5966a00d601Sbellard 
5976a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
5988a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
5996a00d601Sbellard     }
6009fa3e853Sbellard 
6018a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
602fd6ce8f6Sbellard     page_flush_tb();
6039fa3e853Sbellard 
604fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
605d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
606d4e8164fSbellard        expensive */
607e3db7226Sbellard     tb_flush_count++;
608fd6ce8f6Sbellard }
609fd6ce8f6Sbellard 
610fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
611fd6ce8f6Sbellard 
612bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
613fd6ce8f6Sbellard {
614fd6ce8f6Sbellard     TranslationBlock *tb;
615fd6ce8f6Sbellard     int i;
616fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
61799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
61899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
619fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
620fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
621fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
62299773bd4Spbrook                        address, (long)tb->pc, tb->size);
623fd6ce8f6Sbellard             }
624fd6ce8f6Sbellard         }
625fd6ce8f6Sbellard     }
626fd6ce8f6Sbellard }
627fd6ce8f6Sbellard 
628fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
629fd6ce8f6Sbellard static void tb_page_check(void)
630fd6ce8f6Sbellard {
631fd6ce8f6Sbellard     TranslationBlock *tb;
632fd6ce8f6Sbellard     int i, flags1, flags2;
633fd6ce8f6Sbellard 
63499773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
63599773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
636fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
637fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
638fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
639fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
64099773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
641fd6ce8f6Sbellard             }
642fd6ce8f6Sbellard         }
643fd6ce8f6Sbellard     }
644fd6ce8f6Sbellard }
645fd6ce8f6Sbellard 
646bdaf78e0Sblueswir1 static void tb_jmp_check(TranslationBlock *tb)
647d4e8164fSbellard {
648d4e8164fSbellard     TranslationBlock *tb1;
649d4e8164fSbellard     unsigned int n1;
650d4e8164fSbellard 
651d4e8164fSbellard     /* suppress any remaining jumps to this TB */
652d4e8164fSbellard     tb1 = tb->jmp_first;
653d4e8164fSbellard     for(;;) {
654d4e8164fSbellard         n1 = (long)tb1 & 3;
655d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
656d4e8164fSbellard         if (n1 == 2)
657d4e8164fSbellard             break;
658d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
659d4e8164fSbellard     }
660d4e8164fSbellard     /* check end of list */
661d4e8164fSbellard     if (tb1 != tb) {
662d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
663d4e8164fSbellard     }
664d4e8164fSbellard }
665d4e8164fSbellard 
666fd6ce8f6Sbellard #endif
667fd6ce8f6Sbellard 
668fd6ce8f6Sbellard /* invalidate one TB */
669fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
670fd6ce8f6Sbellard                              int next_offset)
671fd6ce8f6Sbellard {
672fd6ce8f6Sbellard     TranslationBlock *tb1;
673fd6ce8f6Sbellard     for(;;) {
674fd6ce8f6Sbellard         tb1 = *ptb;
675fd6ce8f6Sbellard         if (tb1 == tb) {
676fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
677fd6ce8f6Sbellard             break;
678fd6ce8f6Sbellard         }
679fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
680fd6ce8f6Sbellard     }
681fd6ce8f6Sbellard }
682fd6ce8f6Sbellard 
6839fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
6849fa3e853Sbellard {
6859fa3e853Sbellard     TranslationBlock *tb1;
6869fa3e853Sbellard     unsigned int n1;
6879fa3e853Sbellard 
6889fa3e853Sbellard     for(;;) {
6899fa3e853Sbellard         tb1 = *ptb;
6909fa3e853Sbellard         n1 = (long)tb1 & 3;
6919fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
6929fa3e853Sbellard         if (tb1 == tb) {
6939fa3e853Sbellard             *ptb = tb1->page_next[n1];
6949fa3e853Sbellard             break;
6959fa3e853Sbellard         }
6969fa3e853Sbellard         ptb = &tb1->page_next[n1];
6979fa3e853Sbellard     }
6989fa3e853Sbellard }
6999fa3e853Sbellard 
700d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
701d4e8164fSbellard {
702d4e8164fSbellard     TranslationBlock *tb1, **ptb;
703d4e8164fSbellard     unsigned int n1;
704d4e8164fSbellard 
705d4e8164fSbellard     ptb = &tb->jmp_next[n];
706d4e8164fSbellard     tb1 = *ptb;
707d4e8164fSbellard     if (tb1) {
708d4e8164fSbellard         /* find tb(n) in circular list */
709d4e8164fSbellard         for(;;) {
710d4e8164fSbellard             tb1 = *ptb;
711d4e8164fSbellard             n1 = (long)tb1 & 3;
712d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
713d4e8164fSbellard             if (n1 == n && tb1 == tb)
714d4e8164fSbellard                 break;
715d4e8164fSbellard             if (n1 == 2) {
716d4e8164fSbellard                 ptb = &tb1->jmp_first;
717d4e8164fSbellard             } else {
718d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
719d4e8164fSbellard             }
720d4e8164fSbellard         }
721d4e8164fSbellard         /* now we can suppress tb(n) from the list */
722d4e8164fSbellard         *ptb = tb->jmp_next[n];
723d4e8164fSbellard 
724d4e8164fSbellard         tb->jmp_next[n] = NULL;
725d4e8164fSbellard     }
726d4e8164fSbellard }
727d4e8164fSbellard 
728d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
729d4e8164fSbellard    another TB */
730d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
731d4e8164fSbellard {
732d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
733d4e8164fSbellard }
734d4e8164fSbellard 
7352e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
736fd6ce8f6Sbellard {
7376a00d601Sbellard     CPUState *env;
738fd6ce8f6Sbellard     PageDesc *p;
7398a40a180Sbellard     unsigned int h, n1;
74000f82b8aSaurel32     target_phys_addr_t phys_pc;
7418a40a180Sbellard     TranslationBlock *tb1, *tb2;
742fd6ce8f6Sbellard 
7439fa3e853Sbellard     /* remove the TB from the hash list */
7449fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
7459fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
7469fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
7479fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
7489fa3e853Sbellard 
7499fa3e853Sbellard     /* remove the TB from the page list */
7509fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
7519fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
7529fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7539fa3e853Sbellard         invalidate_page_bitmap(p);
7549fa3e853Sbellard     }
7559fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
7569fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
7579fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7589fa3e853Sbellard         invalidate_page_bitmap(p);
7599fa3e853Sbellard     }
7609fa3e853Sbellard 
7618a40a180Sbellard     tb_invalidated_flag = 1;
7628a40a180Sbellard 
7638a40a180Sbellard     /* remove the TB from the hash list */
7648a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
7656a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7666a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
7676a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
7686a00d601Sbellard     }
7698a40a180Sbellard 
7708a40a180Sbellard     /* suppress this TB from the two jump lists */
7718a40a180Sbellard     tb_jmp_remove(tb, 0);
7728a40a180Sbellard     tb_jmp_remove(tb, 1);
7738a40a180Sbellard 
7748a40a180Sbellard     /* suppress any remaining jumps to this TB */
7758a40a180Sbellard     tb1 = tb->jmp_first;
7768a40a180Sbellard     for(;;) {
7778a40a180Sbellard         n1 = (long)tb1 & 3;
7788a40a180Sbellard         if (n1 == 2)
7798a40a180Sbellard             break;
7808a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7818a40a180Sbellard         tb2 = tb1->jmp_next[n1];
7828a40a180Sbellard         tb_reset_jump(tb1, n1);
7838a40a180Sbellard         tb1->jmp_next[n1] = NULL;
7848a40a180Sbellard         tb1 = tb2;
7858a40a180Sbellard     }
7868a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
7878a40a180Sbellard 
788e3db7226Sbellard     tb_phys_invalidate_count++;
7899fa3e853Sbellard }
7909fa3e853Sbellard 
7919fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
7929fa3e853Sbellard {
7939fa3e853Sbellard     int end, mask, end1;
7949fa3e853Sbellard 
7959fa3e853Sbellard     end = start + len;
7969fa3e853Sbellard     tab += start >> 3;
7979fa3e853Sbellard     mask = 0xff << (start & 7);
7989fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
7999fa3e853Sbellard         if (start < end) {
8009fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8019fa3e853Sbellard             *tab |= mask;
8029fa3e853Sbellard         }
8039fa3e853Sbellard     } else {
8049fa3e853Sbellard         *tab++ |= mask;
8059fa3e853Sbellard         start = (start + 8) & ~7;
8069fa3e853Sbellard         end1 = end & ~7;
8079fa3e853Sbellard         while (start < end1) {
8089fa3e853Sbellard             *tab++ = 0xff;
8099fa3e853Sbellard             start += 8;
8109fa3e853Sbellard         }
8119fa3e853Sbellard         if (start < end) {
8129fa3e853Sbellard             mask = ~(0xff << (end & 7));
8139fa3e853Sbellard             *tab |= mask;
8149fa3e853Sbellard         }
8159fa3e853Sbellard     }
8169fa3e853Sbellard }
8179fa3e853Sbellard 
8189fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
8199fa3e853Sbellard {
8209fa3e853Sbellard     int n, tb_start, tb_end;
8219fa3e853Sbellard     TranslationBlock *tb;
8229fa3e853Sbellard 
823b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
8249fa3e853Sbellard 
8259fa3e853Sbellard     tb = p->first_tb;
8269fa3e853Sbellard     while (tb != NULL) {
8279fa3e853Sbellard         n = (long)tb & 3;
8289fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8299fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8309fa3e853Sbellard         if (n == 0) {
8319fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8329fa3e853Sbellard                it is not a problem */
8339fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
8349fa3e853Sbellard             tb_end = tb_start + tb->size;
8359fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
8369fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
8379fa3e853Sbellard         } else {
8389fa3e853Sbellard             tb_start = 0;
8399fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8409fa3e853Sbellard         }
8419fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
8429fa3e853Sbellard         tb = tb->page_next[n];
8439fa3e853Sbellard     }
8449fa3e853Sbellard }
8459fa3e853Sbellard 
8462e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
8472e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
8482e70f6efSpbrook                               int flags, int cflags)
849d720b93dSbellard {
850d720b93dSbellard     TranslationBlock *tb;
851d720b93dSbellard     uint8_t *tc_ptr;
852d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
853d720b93dSbellard     int code_gen_size;
854d720b93dSbellard 
855c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
856c27004ecSbellard     tb = tb_alloc(pc);
857d720b93dSbellard     if (!tb) {
858d720b93dSbellard         /* flush must be done */
859d720b93dSbellard         tb_flush(env);
860d720b93dSbellard         /* cannot fail at this point */
861c27004ecSbellard         tb = tb_alloc(pc);
8622e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
8632e70f6efSpbrook         tb_invalidated_flag = 1;
864d720b93dSbellard     }
865d720b93dSbellard     tc_ptr = code_gen_ptr;
866d720b93dSbellard     tb->tc_ptr = tc_ptr;
867d720b93dSbellard     tb->cs_base = cs_base;
868d720b93dSbellard     tb->flags = flags;
869d720b93dSbellard     tb->cflags = cflags;
870d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
871d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
872d720b93dSbellard 
873d720b93dSbellard     /* check next page if needed */
874c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
875d720b93dSbellard     phys_page2 = -1;
876c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
877d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
878d720b93dSbellard     }
879d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
8802e70f6efSpbrook     return tb;
881d720b93dSbellard }
882d720b93dSbellard 
8839fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
8849fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
885d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
886d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
887d720b93dSbellard    TB if code is modified inside this TB. */
88800f82b8aSaurel32 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
889d720b93dSbellard                                    int is_cpu_write_access)
8909fa3e853Sbellard {
8916b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
892d720b93dSbellard     CPUState *env = cpu_single_env;
8939fa3e853Sbellard     target_ulong tb_start, tb_end;
8946b917547Saliguori     PageDesc *p;
8956b917547Saliguori     int n;
8966b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
8976b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
8986b917547Saliguori     TranslationBlock *current_tb = NULL;
8996b917547Saliguori     int current_tb_modified = 0;
9006b917547Saliguori     target_ulong current_pc = 0;
9016b917547Saliguori     target_ulong current_cs_base = 0;
9026b917547Saliguori     int current_flags = 0;
9036b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9049fa3e853Sbellard 
9059fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9069fa3e853Sbellard     if (!p)
9079fa3e853Sbellard         return;
9089fa3e853Sbellard     if (!p->code_bitmap &&
909d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
910d720b93dSbellard         is_cpu_write_access) {
9119fa3e853Sbellard         /* build code bitmap */
9129fa3e853Sbellard         build_page_bitmap(p);
9139fa3e853Sbellard     }
9149fa3e853Sbellard 
9159fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
9169fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
9179fa3e853Sbellard     tb = p->first_tb;
9189fa3e853Sbellard     while (tb != NULL) {
9199fa3e853Sbellard         n = (long)tb & 3;
9209fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9219fa3e853Sbellard         tb_next = tb->page_next[n];
9229fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9239fa3e853Sbellard         if (n == 0) {
9249fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9259fa3e853Sbellard                it is not a problem */
9269fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9279fa3e853Sbellard             tb_end = tb_start + tb->size;
9289fa3e853Sbellard         } else {
9299fa3e853Sbellard             tb_start = tb->page_addr[1];
9309fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9319fa3e853Sbellard         }
9329fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
933d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
934d720b93dSbellard             if (current_tb_not_found) {
935d720b93dSbellard                 current_tb_not_found = 0;
936d720b93dSbellard                 current_tb = NULL;
9372e70f6efSpbrook                 if (env->mem_io_pc) {
938d720b93dSbellard                     /* now we have a real cpu fault */
9392e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
940d720b93dSbellard                 }
941d720b93dSbellard             }
942d720b93dSbellard             if (current_tb == tb &&
9432e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
944d720b93dSbellard                 /* If we are modifying the current TB, we must stop
945d720b93dSbellard                 its execution. We could be more precise by checking
946d720b93dSbellard                 that the modification is after the current PC, but it
947d720b93dSbellard                 would require a specialized function to partially
948d720b93dSbellard                 restore the CPU state */
949d720b93dSbellard 
950d720b93dSbellard                 current_tb_modified = 1;
951d720b93dSbellard                 cpu_restore_state(current_tb, env,
9522e70f6efSpbrook                                   env->mem_io_pc, NULL);
9536b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
9546b917547Saliguori                                      &current_flags);
955d720b93dSbellard             }
956d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9576f5a9f7eSbellard             /* we need to do that to handle the case where a signal
9586f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
9596f5a9f7eSbellard             saved_tb = NULL;
9606f5a9f7eSbellard             if (env) {
961ea1c1802Sbellard                 saved_tb = env->current_tb;
962ea1c1802Sbellard                 env->current_tb = NULL;
9636f5a9f7eSbellard             }
9649fa3e853Sbellard             tb_phys_invalidate(tb, -1);
9656f5a9f7eSbellard             if (env) {
966ea1c1802Sbellard                 env->current_tb = saved_tb;
967ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
968ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
9699fa3e853Sbellard             }
9706f5a9f7eSbellard         }
9719fa3e853Sbellard         tb = tb_next;
9729fa3e853Sbellard     }
9739fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
9749fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
9759fa3e853Sbellard     if (!p->first_tb) {
9769fa3e853Sbellard         invalidate_page_bitmap(p);
977d720b93dSbellard         if (is_cpu_write_access) {
9782e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
979d720b93dSbellard         }
980d720b93dSbellard     }
981d720b93dSbellard #endif
982d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
983d720b93dSbellard     if (current_tb_modified) {
984d720b93dSbellard         /* we generate a block containing just the instruction
985d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
986d720b93dSbellard            itself */
987ea1c1802Sbellard         env->current_tb = NULL;
9882e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
989d720b93dSbellard         cpu_resume_from_signal(env, NULL);
9909fa3e853Sbellard     }
9919fa3e853Sbellard #endif
9929fa3e853Sbellard }
9939fa3e853Sbellard 
9949fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
99500f82b8aSaurel32 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9969fa3e853Sbellard {
9979fa3e853Sbellard     PageDesc *p;
9989fa3e853Sbellard     int offset, b;
99959817ccbSbellard #if 0
1000a4193c8aSbellard     if (1) {
100193fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10022e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1003a4193c8aSbellard                   cpu_single_env->eip,
1004a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1005a4193c8aSbellard     }
100659817ccbSbellard #endif
10079fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10089fa3e853Sbellard     if (!p)
10099fa3e853Sbellard         return;
10109fa3e853Sbellard     if (p->code_bitmap) {
10119fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
10129fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
10139fa3e853Sbellard         if (b & ((1 << len) - 1))
10149fa3e853Sbellard             goto do_invalidate;
10159fa3e853Sbellard     } else {
10169fa3e853Sbellard     do_invalidate:
1017d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
10189fa3e853Sbellard     }
10199fa3e853Sbellard }
10209fa3e853Sbellard 
10219fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
102200f82b8aSaurel32 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1023d720b93dSbellard                                     unsigned long pc, void *puc)
10249fa3e853Sbellard {
10256b917547Saliguori     TranslationBlock *tb;
10269fa3e853Sbellard     PageDesc *p;
10276b917547Saliguori     int n;
1028d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
10296b917547Saliguori     TranslationBlock *current_tb = NULL;
1030d720b93dSbellard     CPUState *env = cpu_single_env;
10316b917547Saliguori     int current_tb_modified = 0;
10326b917547Saliguori     target_ulong current_pc = 0;
10336b917547Saliguori     target_ulong current_cs_base = 0;
10346b917547Saliguori     int current_flags = 0;
1035d720b93dSbellard #endif
10369fa3e853Sbellard 
10379fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
10389fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1039fd6ce8f6Sbellard     if (!p)
1040fd6ce8f6Sbellard         return;
1041fd6ce8f6Sbellard     tb = p->first_tb;
1042d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1043d720b93dSbellard     if (tb && pc != 0) {
1044d720b93dSbellard         current_tb = tb_find_pc(pc);
1045d720b93dSbellard     }
1046d720b93dSbellard #endif
1047fd6ce8f6Sbellard     while (tb != NULL) {
10489fa3e853Sbellard         n = (long)tb & 3;
10499fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1050d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1051d720b93dSbellard         if (current_tb == tb &&
10522e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1053d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1054d720b93dSbellard                    its execution. We could be more precise by checking
1055d720b93dSbellard                    that the modification is after the current PC, but it
1056d720b93dSbellard                    would require a specialized function to partially
1057d720b93dSbellard                    restore the CPU state */
1058d720b93dSbellard 
1059d720b93dSbellard             current_tb_modified = 1;
1060d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
10616b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10626b917547Saliguori                                  &current_flags);
1063d720b93dSbellard         }
1064d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10659fa3e853Sbellard         tb_phys_invalidate(tb, addr);
10669fa3e853Sbellard         tb = tb->page_next[n];
1067fd6ce8f6Sbellard     }
1068fd6ce8f6Sbellard     p->first_tb = NULL;
1069d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1070d720b93dSbellard     if (current_tb_modified) {
1071d720b93dSbellard         /* we generate a block containing just the instruction
1072d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1073d720b93dSbellard            itself */
1074ea1c1802Sbellard         env->current_tb = NULL;
10752e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1076d720b93dSbellard         cpu_resume_from_signal(env, puc);
1077d720b93dSbellard     }
1078d720b93dSbellard #endif
1079fd6ce8f6Sbellard }
10809fa3e853Sbellard #endif
1081fd6ce8f6Sbellard 
1082fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
10839fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
108453a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1085fd6ce8f6Sbellard {
1086fd6ce8f6Sbellard     PageDesc *p;
10879fa3e853Sbellard     TranslationBlock *last_first_tb;
10889fa3e853Sbellard 
10899fa3e853Sbellard     tb->page_addr[n] = page_addr;
10903a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
10919fa3e853Sbellard     tb->page_next[n] = p->first_tb;
10929fa3e853Sbellard     last_first_tb = p->first_tb;
10939fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
10949fa3e853Sbellard     invalidate_page_bitmap(p);
10959fa3e853Sbellard 
1096107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1097d720b93dSbellard 
10989fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
10999fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
110053a5960aSpbrook         target_ulong addr;
110153a5960aSpbrook         PageDesc *p2;
1102fd6ce8f6Sbellard         int prot;
1103fd6ce8f6Sbellard 
1104fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1105fd6ce8f6Sbellard            page fault + mprotect overhead) */
110653a5960aSpbrook         page_addr &= qemu_host_page_mask;
1107fd6ce8f6Sbellard         prot = 0;
110853a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
110953a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
111053a5960aSpbrook 
111153a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
111253a5960aSpbrook             if (!p2)
111353a5960aSpbrook                 continue;
111453a5960aSpbrook             prot |= p2->flags;
111553a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
111653a5960aSpbrook             page_get_flags(addr);
111753a5960aSpbrook           }
111853a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1119fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1120fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1121ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
112253a5960aSpbrook                page_addr);
1123fd6ce8f6Sbellard #endif
1124fd6ce8f6Sbellard     }
11259fa3e853Sbellard #else
11269fa3e853Sbellard     /* if some code is already present, then the pages are already
11279fa3e853Sbellard        protected. So we handle the case where only the first TB is
11289fa3e853Sbellard        allocated in a physical page */
11299fa3e853Sbellard     if (!last_first_tb) {
11306a00d601Sbellard         tlb_protect_code(page_addr);
11319fa3e853Sbellard     }
11329fa3e853Sbellard #endif
1133d720b93dSbellard 
1134d720b93dSbellard #endif /* TARGET_HAS_SMC */
1135fd6ce8f6Sbellard }
1136fd6ce8f6Sbellard 
1137fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1138fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1139c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1140fd6ce8f6Sbellard {
1141fd6ce8f6Sbellard     TranslationBlock *tb;
1142fd6ce8f6Sbellard 
114326a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
114426a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1145d4e8164fSbellard         return NULL;
1146fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1147fd6ce8f6Sbellard     tb->pc = pc;
1148b448f2f3Sbellard     tb->cflags = 0;
1149d4e8164fSbellard     return tb;
1150d4e8164fSbellard }
1151d4e8164fSbellard 
11522e70f6efSpbrook void tb_free(TranslationBlock *tb)
11532e70f6efSpbrook {
1154bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
11552e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
11562e70f6efSpbrook        be the last one generated.  */
11572e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
11582e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
11592e70f6efSpbrook         nb_tbs--;
11602e70f6efSpbrook     }
11612e70f6efSpbrook }
11622e70f6efSpbrook 
11639fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
11649fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
11659fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
11669fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1167d4e8164fSbellard {
11689fa3e853Sbellard     unsigned int h;
11699fa3e853Sbellard     TranslationBlock **ptb;
11709fa3e853Sbellard 
1171c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1172c8a706feSpbrook        before we are done.  */
1173c8a706feSpbrook     mmap_lock();
11749fa3e853Sbellard     /* add in the physical hash table */
11759fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
11769fa3e853Sbellard     ptb = &tb_phys_hash[h];
11779fa3e853Sbellard     tb->phys_hash_next = *ptb;
11789fa3e853Sbellard     *ptb = tb;
1179fd6ce8f6Sbellard 
1180fd6ce8f6Sbellard     /* add in the page list */
11819fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
11829fa3e853Sbellard     if (phys_page2 != -1)
11839fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
11849fa3e853Sbellard     else
11859fa3e853Sbellard         tb->page_addr[1] = -1;
11869fa3e853Sbellard 
1187d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188d4e8164fSbellard     tb->jmp_next[0] = NULL;
1189d4e8164fSbellard     tb->jmp_next[1] = NULL;
1190d4e8164fSbellard 
1191d4e8164fSbellard     /* init original jump addresses */
1192d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1193d4e8164fSbellard         tb_reset_jump(tb, 0);
1194d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1195d4e8164fSbellard         tb_reset_jump(tb, 1);
11968a40a180Sbellard 
11978a40a180Sbellard #ifdef DEBUG_TB_CHECK
11988a40a180Sbellard     tb_page_check();
11998a40a180Sbellard #endif
1200c8a706feSpbrook     mmap_unlock();
1201fd6ce8f6Sbellard }
1202fd6ce8f6Sbellard 
1203a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1205a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1206a513fe19Sbellard {
1207a513fe19Sbellard     int m_min, m_max, m;
1208a513fe19Sbellard     unsigned long v;
1209a513fe19Sbellard     TranslationBlock *tb;
1210a513fe19Sbellard 
1211a513fe19Sbellard     if (nb_tbs <= 0)
1212a513fe19Sbellard         return NULL;
1213a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1214a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1215a513fe19Sbellard         return NULL;
1216a513fe19Sbellard     /* binary search (cf Knuth) */
1217a513fe19Sbellard     m_min = 0;
1218a513fe19Sbellard     m_max = nb_tbs - 1;
1219a513fe19Sbellard     while (m_min <= m_max) {
1220a513fe19Sbellard         m = (m_min + m_max) >> 1;
1221a513fe19Sbellard         tb = &tbs[m];
1222a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1223a513fe19Sbellard         if (v == tc_ptr)
1224a513fe19Sbellard             return tb;
1225a513fe19Sbellard         else if (tc_ptr < v) {
1226a513fe19Sbellard             m_max = m - 1;
1227a513fe19Sbellard         } else {
1228a513fe19Sbellard             m_min = m + 1;
1229a513fe19Sbellard         }
1230a513fe19Sbellard     }
1231a513fe19Sbellard     return &tbs[m_max];
1232a513fe19Sbellard }
12337501267eSbellard 
1234ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1235ea041c0eSbellard 
1236ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1237ea041c0eSbellard {
1238ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1239ea041c0eSbellard     unsigned int n1;
1240ea041c0eSbellard 
1241ea041c0eSbellard     tb1 = tb->jmp_next[n];
1242ea041c0eSbellard     if (tb1 != NULL) {
1243ea041c0eSbellard         /* find head of list */
1244ea041c0eSbellard         for(;;) {
1245ea041c0eSbellard             n1 = (long)tb1 & 3;
1246ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247ea041c0eSbellard             if (n1 == 2)
1248ea041c0eSbellard                 break;
1249ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1250ea041c0eSbellard         }
1251ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1252ea041c0eSbellard         tb_next = tb1;
1253ea041c0eSbellard 
1254ea041c0eSbellard         /* remove tb from the jmp_first list */
1255ea041c0eSbellard         ptb = &tb_next->jmp_first;
1256ea041c0eSbellard         for(;;) {
1257ea041c0eSbellard             tb1 = *ptb;
1258ea041c0eSbellard             n1 = (long)tb1 & 3;
1259ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260ea041c0eSbellard             if (n1 == n && tb1 == tb)
1261ea041c0eSbellard                 break;
1262ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1263ea041c0eSbellard         }
1264ea041c0eSbellard         *ptb = tb->jmp_next[n];
1265ea041c0eSbellard         tb->jmp_next[n] = NULL;
1266ea041c0eSbellard 
1267ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1268ea041c0eSbellard         tb_reset_jump(tb, n);
1269ea041c0eSbellard 
12700124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1271ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1272ea041c0eSbellard     }
1273ea041c0eSbellard }
1274ea041c0eSbellard 
1275ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1276ea041c0eSbellard {
1277ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1278ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1279ea041c0eSbellard }
1280ea041c0eSbellard 
12811fddef4bSbellard #if defined(TARGET_HAS_ICE)
1282d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283d720b93dSbellard {
12849b3c35e0Sj_mayer     target_phys_addr_t addr;
12859b3c35e0Sj_mayer     target_ulong pd;
1286c2f07f81Spbrook     ram_addr_t ram_addr;
1287c2f07f81Spbrook     PhysPageDesc *p;
1288d720b93dSbellard 
1289c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1290c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291c2f07f81Spbrook     if (!p) {
1292c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1293c2f07f81Spbrook     } else {
1294c2f07f81Spbrook         pd = p->phys_offset;
1295c2f07f81Spbrook     }
1296c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1297706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1298d720b93dSbellard }
1299c27004ecSbellard #endif
1300d720b93dSbellard 
13016658ffb8Spbrook /* Add a watchpoint.  */
1302a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1303a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
13046658ffb8Spbrook {
1305b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1306c0ce998eSaliguori     CPUWatchpoint *wp;
13076658ffb8Spbrook 
1308b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1309b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1310b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1311b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1312b4051334Saliguori         return -EINVAL;
1313b4051334Saliguori     }
1314a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
13156658ffb8Spbrook 
1316a1d1bb31Saliguori     wp->vaddr = addr;
1317b4051334Saliguori     wp->len_mask = len_mask;
1318a1d1bb31Saliguori     wp->flags = flags;
1319a1d1bb31Saliguori 
13202dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1321c0ce998eSaliguori     if (flags & BP_GDB)
1322c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1323c0ce998eSaliguori     else
1324c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1325a1d1bb31Saliguori 
13266658ffb8Spbrook     tlb_flush_page(env, addr);
1327a1d1bb31Saliguori 
1328a1d1bb31Saliguori     if (watchpoint)
1329a1d1bb31Saliguori         *watchpoint = wp;
1330a1d1bb31Saliguori     return 0;
13316658ffb8Spbrook }
13326658ffb8Spbrook 
1333a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1334a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1335a1d1bb31Saliguori                           int flags)
13366658ffb8Spbrook {
1337b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1338a1d1bb31Saliguori     CPUWatchpoint *wp;
13396658ffb8Spbrook 
1340c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1341b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
13426e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1343a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
13446658ffb8Spbrook             return 0;
13456658ffb8Spbrook         }
13466658ffb8Spbrook     }
1347a1d1bb31Saliguori     return -ENOENT;
13486658ffb8Spbrook }
13496658ffb8Spbrook 
1350a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1351a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1352a1d1bb31Saliguori {
1353c0ce998eSaliguori     TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
13547d03f82fSedgar_igl 
1355a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1356a1d1bb31Saliguori 
1357a1d1bb31Saliguori     qemu_free(watchpoint);
13587d03f82fSedgar_igl }
13597d03f82fSedgar_igl 
1360a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1361a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1362a1d1bb31Saliguori {
1363c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1364a1d1bb31Saliguori 
1365c0ce998eSaliguori     TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1366a1d1bb31Saliguori         if (wp->flags & mask)
1367a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1368a1d1bb31Saliguori     }
1369c0ce998eSaliguori }
1370a1d1bb31Saliguori 
1371a1d1bb31Saliguori /* Add a breakpoint.  */
1372a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1373a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
13744c3a88a2Sbellard {
13751fddef4bSbellard #if defined(TARGET_HAS_ICE)
1376c0ce998eSaliguori     CPUBreakpoint *bp;
13774c3a88a2Sbellard 
1378a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
13794c3a88a2Sbellard 
1380a1d1bb31Saliguori     bp->pc = pc;
1381a1d1bb31Saliguori     bp->flags = flags;
1382a1d1bb31Saliguori 
13832dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1384c0ce998eSaliguori     if (flags & BP_GDB)
1385c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1386c0ce998eSaliguori     else
1387c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1388d720b93dSbellard 
1389d720b93dSbellard     breakpoint_invalidate(env, pc);
1390a1d1bb31Saliguori 
1391a1d1bb31Saliguori     if (breakpoint)
1392a1d1bb31Saliguori         *breakpoint = bp;
13934c3a88a2Sbellard     return 0;
13944c3a88a2Sbellard #else
1395a1d1bb31Saliguori     return -ENOSYS;
13964c3a88a2Sbellard #endif
13974c3a88a2Sbellard }
13984c3a88a2Sbellard 
1399a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1400a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1401a1d1bb31Saliguori {
14027d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1403a1d1bb31Saliguori     CPUBreakpoint *bp;
1404a1d1bb31Saliguori 
1405c0ce998eSaliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1406a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1407a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1408a1d1bb31Saliguori             return 0;
14097d03f82fSedgar_igl         }
1410a1d1bb31Saliguori     }
1411a1d1bb31Saliguori     return -ENOENT;
1412a1d1bb31Saliguori #else
1413a1d1bb31Saliguori     return -ENOSYS;
14147d03f82fSedgar_igl #endif
14157d03f82fSedgar_igl }
14167d03f82fSedgar_igl 
1417a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1418a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
14194c3a88a2Sbellard {
14201fddef4bSbellard #if defined(TARGET_HAS_ICE)
1421c0ce998eSaliguori     TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1422d720b93dSbellard 
1423a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1424a1d1bb31Saliguori 
1425a1d1bb31Saliguori     qemu_free(breakpoint);
1426a1d1bb31Saliguori #endif
1427a1d1bb31Saliguori }
1428a1d1bb31Saliguori 
1429a1d1bb31Saliguori /* Remove all matching breakpoints. */
1430a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1431a1d1bb31Saliguori {
1432a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1433c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1434a1d1bb31Saliguori 
1435c0ce998eSaliguori     TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1436a1d1bb31Saliguori         if (bp->flags & mask)
1437a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1438c0ce998eSaliguori     }
14394c3a88a2Sbellard #endif
14404c3a88a2Sbellard }
14414c3a88a2Sbellard 
1442c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1443c33a346eSbellard    CPU loop after each instruction */
1444c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1445c33a346eSbellard {
14461fddef4bSbellard #if defined(TARGET_HAS_ICE)
1447c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1448c33a346eSbellard         env->singlestep_enabled = enabled;
1449c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
14509fa3e853Sbellard         /* XXX: only flush what is necessary */
14510124311eSbellard         tb_flush(env);
1452c33a346eSbellard     }
1453c33a346eSbellard #endif
1454c33a346eSbellard }
1455c33a346eSbellard 
145634865134Sbellard /* enable or disable low levels log */
145734865134Sbellard void cpu_set_log(int log_flags)
145834865134Sbellard {
145934865134Sbellard     loglevel = log_flags;
146034865134Sbellard     if (loglevel && !logfile) {
146111fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
146234865134Sbellard         if (!logfile) {
146334865134Sbellard             perror(logfilename);
146434865134Sbellard             _exit(1);
146534865134Sbellard         }
14669fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14679fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
14689fa3e853Sbellard         {
1469b55266b5Sblueswir1             static char logfile_buf[4096];
14709fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
14719fa3e853Sbellard         }
14729fa3e853Sbellard #else
147334865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
14749fa3e853Sbellard #endif
1475e735b91cSpbrook         log_append = 1;
1476e735b91cSpbrook     }
1477e735b91cSpbrook     if (!loglevel && logfile) {
1478e735b91cSpbrook         fclose(logfile);
1479e735b91cSpbrook         logfile = NULL;
148034865134Sbellard     }
148134865134Sbellard }
148234865134Sbellard 
148334865134Sbellard void cpu_set_log_filename(const char *filename)
148434865134Sbellard {
148534865134Sbellard     logfilename = strdup(filename);
1486e735b91cSpbrook     if (logfile) {
1487e735b91cSpbrook         fclose(logfile);
1488e735b91cSpbrook         logfile = NULL;
1489e735b91cSpbrook     }
1490e735b91cSpbrook     cpu_set_log(loglevel);
149134865134Sbellard }
1492c33a346eSbellard 
14930124311eSbellard /* mask must never be zero, except for A20 change call */
149468a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1495ea041c0eSbellard {
1496d5975363Spbrook #if !defined(USE_NPTL)
1497ea041c0eSbellard     TranslationBlock *tb;
149815a51156Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1499d5975363Spbrook #endif
15002e70f6efSpbrook     int old_mask;
1501ea041c0eSbellard 
15022e70f6efSpbrook     old_mask = env->interrupt_request;
1503d5975363Spbrook     /* FIXME: This is probably not threadsafe.  A different thread could
1504bf20dc07Sths        be in the middle of a read-modify-write operation.  */
150568a79315Sbellard     env->interrupt_request |= mask;
1506d5975363Spbrook #if defined(USE_NPTL)
1507d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1509d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1510d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
1511d5975363Spbrook #else
15122e70f6efSpbrook     if (use_icount) {
1513266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
15142e70f6efSpbrook #ifndef CONFIG_USER_ONLY
15152e70f6efSpbrook         /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
15162e70f6efSpbrook            an async event happened and we need to process it.  */
15172e70f6efSpbrook         if (!can_do_io(env)
15182e70f6efSpbrook             && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
15192e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
15202e70f6efSpbrook         }
15212e70f6efSpbrook #endif
15222e70f6efSpbrook     } else {
15232e70f6efSpbrook         tb = env->current_tb;
1524ea041c0eSbellard         /* if the cpu is currently executing code, we must unlink it and
1525ea041c0eSbellard            all the potentially executing TB */
1526ee8b7021Sbellard         if (tb && !testandset(&interrupt_lock)) {
1527ee8b7021Sbellard             env->current_tb = NULL;
1528ea041c0eSbellard             tb_reset_jump_recursive(tb);
152915a51156Saurel32             resetlock(&interrupt_lock);
1530ea041c0eSbellard         }
15312e70f6efSpbrook     }
1532d5975363Spbrook #endif
1533ea041c0eSbellard }
1534ea041c0eSbellard 
1535b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1536b54ad049Sbellard {
1537b54ad049Sbellard     env->interrupt_request &= ~mask;
1538b54ad049Sbellard }
1539b54ad049Sbellard 
1540c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1541f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1542f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1543f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1544f193c797Sbellard       "show target assembly code for each compiled TB" },
1545f193c797Sbellard     { CPU_LOG_TB_OP, "op",
154657fec1feSbellard       "show micro ops for each compiled TB" },
1547f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1548e01a1157Sblueswir1       "show micro ops "
1549e01a1157Sblueswir1 #ifdef TARGET_I386
1550e01a1157Sblueswir1       "before eflags optimization and "
1551f193c797Sbellard #endif
1552e01a1157Sblueswir1       "after liveness analysis" },
1553f193c797Sbellard     { CPU_LOG_INT, "int",
1554f193c797Sbellard       "show interrupts/exceptions in short format" },
1555f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1556f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
15579fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1558e91c8a77Sths       "show CPU state before block translation" },
1559f193c797Sbellard #ifdef TARGET_I386
1560f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1561f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1562eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1563eca1bdf4Saliguori       "show CPU state before CPU resets" },
1564f193c797Sbellard #endif
15658e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1566fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1567fd872598Sbellard       "show all i/o ports accesses" },
15688e3a9fd2Sbellard #endif
1569f193c797Sbellard     { 0, NULL, NULL },
1570f193c797Sbellard };
1571f193c797Sbellard 
1572f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1573f193c797Sbellard {
1574f193c797Sbellard     if (strlen(s2) != n)
1575f193c797Sbellard         return 0;
1576f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1577f193c797Sbellard }
1578f193c797Sbellard 
1579f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1580f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1581f193c797Sbellard {
1582c7cd6a37Sblueswir1     const CPULogItem *item;
1583f193c797Sbellard     int mask;
1584f193c797Sbellard     const char *p, *p1;
1585f193c797Sbellard 
1586f193c797Sbellard     p = str;
1587f193c797Sbellard     mask = 0;
1588f193c797Sbellard     for(;;) {
1589f193c797Sbellard         p1 = strchr(p, ',');
1590f193c797Sbellard         if (!p1)
1591f193c797Sbellard             p1 = p + strlen(p);
15928e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
15938e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
15948e3a9fd2Sbellard 			mask |= item->mask;
15958e3a9fd2Sbellard 		}
15968e3a9fd2Sbellard 	} else {
1597f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1598f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1599f193c797Sbellard                 goto found;
1600f193c797Sbellard         }
1601f193c797Sbellard         return 0;
16028e3a9fd2Sbellard 	}
1603f193c797Sbellard     found:
1604f193c797Sbellard         mask |= item->mask;
1605f193c797Sbellard         if (*p1 != ',')
1606f193c797Sbellard             break;
1607f193c797Sbellard         p = p1 + 1;
1608f193c797Sbellard     }
1609f193c797Sbellard     return mask;
1610f193c797Sbellard }
1611ea041c0eSbellard 
16127501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
16137501267eSbellard {
16147501267eSbellard     va_list ap;
1615493ae1f0Spbrook     va_list ap2;
16167501267eSbellard 
16177501267eSbellard     va_start(ap, fmt);
1618493ae1f0Spbrook     va_copy(ap2, ap);
16197501267eSbellard     fprintf(stderr, "qemu: fatal: ");
16207501267eSbellard     vfprintf(stderr, fmt, ap);
16217501267eSbellard     fprintf(stderr, "\n");
16227501267eSbellard #ifdef TARGET_I386
16237fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
16247fe48483Sbellard #else
16257fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
16267501267eSbellard #endif
162793fcfe39Saliguori     if (qemu_log_enabled()) {
162893fcfe39Saliguori         qemu_log("qemu: fatal: ");
162993fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
163093fcfe39Saliguori         qemu_log("\n");
1631f9373291Sj_mayer #ifdef TARGET_I386
163293fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1633f9373291Sj_mayer #else
163493fcfe39Saliguori         log_cpu_state(env, 0);
1635f9373291Sj_mayer #endif
163631b1a7b4Saliguori         qemu_log_flush();
163793fcfe39Saliguori         qemu_log_close();
1638924edcaeSbalrog     }
1639493ae1f0Spbrook     va_end(ap2);
1640f9373291Sj_mayer     va_end(ap);
16417501267eSbellard     abort();
16427501267eSbellard }
16437501267eSbellard 
1644c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1645c5be9f08Sths {
164601ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1647c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1648c5be9f08Sths     int cpu_index = new_env->cpu_index;
16495a38f081Saliguori #if defined(TARGET_HAS_ICE)
16505a38f081Saliguori     CPUBreakpoint *bp;
16515a38f081Saliguori     CPUWatchpoint *wp;
16525a38f081Saliguori #endif
16535a38f081Saliguori 
1654c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
16555a38f081Saliguori 
16565a38f081Saliguori     /* Preserve chaining and index. */
1657c5be9f08Sths     new_env->next_cpu = next_cpu;
1658c5be9f08Sths     new_env->cpu_index = cpu_index;
16595a38f081Saliguori 
16605a38f081Saliguori     /* Clone all break/watchpoints.
16615a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
16625a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
16635a38f081Saliguori     TAILQ_INIT(&env->breakpoints);
16645a38f081Saliguori     TAILQ_INIT(&env->watchpoints);
16655a38f081Saliguori #if defined(TARGET_HAS_ICE)
16665a38f081Saliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
16675a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
16685a38f081Saliguori     }
16695a38f081Saliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
16705a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
16715a38f081Saliguori                               wp->flags, NULL);
16725a38f081Saliguori     }
16735a38f081Saliguori #endif
16745a38f081Saliguori 
1675c5be9f08Sths     return new_env;
1676c5be9f08Sths }
1677c5be9f08Sths 
16780124311eSbellard #if !defined(CONFIG_USER_ONLY)
16790124311eSbellard 
16805c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
16815c751e99Sedgar_igl {
16825c751e99Sedgar_igl     unsigned int i;
16835c751e99Sedgar_igl 
16845c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
16855c751e99Sedgar_igl        overlap the flushed page.  */
16865c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
16875c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
16885c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
16895c751e99Sedgar_igl 
16905c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
16915c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
16925c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
16935c751e99Sedgar_igl }
16945c751e99Sedgar_igl 
1695ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1696ee8b7021Sbellard    implemented yet) */
1697ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
169833417e70Sbellard {
169933417e70Sbellard     int i;
17000124311eSbellard 
17019fa3e853Sbellard #if defined(DEBUG_TLB)
17029fa3e853Sbellard     printf("tlb_flush:\n");
17039fa3e853Sbellard #endif
17040124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17050124311eSbellard        links while we are modifying them */
17060124311eSbellard     env->current_tb = NULL;
17070124311eSbellard 
170833417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
170984b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
171084b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
171184b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
171284b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
171384b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
171484b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
17156fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17166fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
17176fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
17186fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
17196fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17206fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
17216fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
17226fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
17236fa4cea9Sj_mayer #endif
17246fa4cea9Sj_mayer #endif
172533417e70Sbellard     }
17269fa3e853Sbellard 
17278a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
17289fa3e853Sbellard 
17290a962c02Sbellard #ifdef USE_KQEMU
17300a962c02Sbellard     if (env->kqemu_enabled) {
17310a962c02Sbellard         kqemu_flush(env, flush_global);
17320a962c02Sbellard     }
17330a962c02Sbellard #endif
1734e3db7226Sbellard     tlb_flush_count++;
173533417e70Sbellard }
173633417e70Sbellard 
1737274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
173861382a50Sbellard {
173984b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
174084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
174184b7b8e7Sbellard         addr == (tlb_entry->addr_write &
174284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
174384b7b8e7Sbellard         addr == (tlb_entry->addr_code &
174484b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
174584b7b8e7Sbellard         tlb_entry->addr_read = -1;
174684b7b8e7Sbellard         tlb_entry->addr_write = -1;
174784b7b8e7Sbellard         tlb_entry->addr_code = -1;
174884b7b8e7Sbellard     }
174961382a50Sbellard }
175061382a50Sbellard 
17512e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
175233417e70Sbellard {
17538a40a180Sbellard     int i;
17540124311eSbellard 
17559fa3e853Sbellard #if defined(DEBUG_TLB)
1756108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
17579fa3e853Sbellard #endif
17580124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17590124311eSbellard        links while we are modifying them */
17600124311eSbellard     env->current_tb = NULL;
176133417e70Sbellard 
176261382a50Sbellard     addr &= TARGET_PAGE_MASK;
176333417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
176484b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
176584b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
17666fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17676fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
17686fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17696fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
17706fa4cea9Sj_mayer #endif
17716fa4cea9Sj_mayer #endif
17720124311eSbellard 
17735c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
177461382a50Sbellard 
17750a962c02Sbellard #ifdef USE_KQEMU
17760a962c02Sbellard     if (env->kqemu_enabled) {
17770a962c02Sbellard         kqemu_flush_page(env, addr);
17780a962c02Sbellard     }
17790a962c02Sbellard #endif
17809fa3e853Sbellard }
17819fa3e853Sbellard 
17829fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
17839fa3e853Sbellard    can be detected */
17846a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
178561382a50Sbellard {
17866a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
17876a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
17886a00d601Sbellard                                     CODE_DIRTY_FLAG);
17899fa3e853Sbellard }
17909fa3e853Sbellard 
17919fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
17923a7d929eSbellard    tested for self modifying code */
17933a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
17943a7d929eSbellard                                     target_ulong vaddr)
17959fa3e853Sbellard {
17963a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
17979fa3e853Sbellard }
17989fa3e853Sbellard 
17991ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
18001ccde1cbSbellard                                          unsigned long start, unsigned long length)
18011ccde1cbSbellard {
18021ccde1cbSbellard     unsigned long addr;
180384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
180484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
18051ccde1cbSbellard         if ((addr - start) < length) {
18060f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
18071ccde1cbSbellard         }
18081ccde1cbSbellard     }
18091ccde1cbSbellard }
18101ccde1cbSbellard 
18113a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
18120a962c02Sbellard                                      int dirty_flags)
18131ccde1cbSbellard {
18141ccde1cbSbellard     CPUState *env;
18154f2ac237Sbellard     unsigned long length, start1;
18160a962c02Sbellard     int i, mask, len;
18170a962c02Sbellard     uint8_t *p;
18181ccde1cbSbellard 
18191ccde1cbSbellard     start &= TARGET_PAGE_MASK;
18201ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
18211ccde1cbSbellard 
18221ccde1cbSbellard     length = end - start;
18231ccde1cbSbellard     if (length == 0)
18241ccde1cbSbellard         return;
18250a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
18263a7d929eSbellard #ifdef USE_KQEMU
18276a00d601Sbellard     /* XXX: should not depend on cpu context */
18286a00d601Sbellard     env = first_cpu;
18293a7d929eSbellard     if (env->kqemu_enabled) {
1830f23db169Sbellard         ram_addr_t addr;
1831f23db169Sbellard         addr = start;
1832f23db169Sbellard         for(i = 0; i < len; i++) {
1833f23db169Sbellard             kqemu_set_notdirty(env, addr);
1834f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1835f23db169Sbellard         }
18363a7d929eSbellard     }
18373a7d929eSbellard #endif
1838f23db169Sbellard     mask = ~dirty_flags;
1839f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1840f23db169Sbellard     for(i = 0; i < len; i++)
1841f23db169Sbellard         p[i] &= mask;
1842f23db169Sbellard 
18431ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18441ccde1cbSbellard        when accessing the range */
184559817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
18466a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
18471ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
184884b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
18491ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
185084b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
18516fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
18526fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
18536fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
18546fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
18556fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
18566fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
18576fa4cea9Sj_mayer #endif
18586fa4cea9Sj_mayer #endif
18596a00d601Sbellard     }
18601ccde1cbSbellard }
18611ccde1cbSbellard 
186274576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
186374576198Saliguori {
186474576198Saliguori     in_migration = enable;
186574576198Saliguori     return 0;
186674576198Saliguori }
186774576198Saliguori 
186874576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
186974576198Saliguori {
187074576198Saliguori     return in_migration;
187174576198Saliguori }
187274576198Saliguori 
18732bec46dcSaliguori void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
18742bec46dcSaliguori {
18752bec46dcSaliguori     if (kvm_enabled())
18762bec46dcSaliguori         kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
18772bec46dcSaliguori }
18782bec46dcSaliguori 
18793a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
18803a7d929eSbellard {
18813a7d929eSbellard     ram_addr_t ram_addr;
18823a7d929eSbellard 
188384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
188484b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
18853a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
18863a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
18870f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
18883a7d929eSbellard         }
18893a7d929eSbellard     }
18903a7d929eSbellard }
18913a7d929eSbellard 
18923a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
18933a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
18943a7d929eSbellard {
18953a7d929eSbellard     int i;
18963a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
189784b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
18983a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
189984b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
19006fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
19016fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
19026fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
19036fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
19046fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
19056fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
19066fa4cea9Sj_mayer #endif
19076fa4cea9Sj_mayer #endif
19083a7d929eSbellard }
19093a7d929eSbellard 
19100f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
19111ccde1cbSbellard {
19120f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
19130f459d16Spbrook         tlb_entry->addr_write = vaddr;
19141ccde1cbSbellard }
19151ccde1cbSbellard 
19160f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
19170f459d16Spbrook    so that it is no longer dirty */
19180f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
19191ccde1cbSbellard {
19201ccde1cbSbellard     int i;
19211ccde1cbSbellard 
19220f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
19231ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
19240f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
19250f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
19266fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
19270f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
19286fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
19290f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
19306fa4cea9Sj_mayer #endif
19316fa4cea9Sj_mayer #endif
19321ccde1cbSbellard }
19331ccde1cbSbellard 
193459817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
193559817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
193659817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
193759817ccbSbellard    conflicting with the host address space). */
193884b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
19392e12669aSbellard                       target_phys_addr_t paddr, int prot,
19406ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
19419fa3e853Sbellard {
194292e873b9Sbellard     PhysPageDesc *p;
19434f2ac237Sbellard     unsigned long pd;
19449fa3e853Sbellard     unsigned int index;
19454f2ac237Sbellard     target_ulong address;
19460f459d16Spbrook     target_ulong code_address;
1947108c49b8Sbellard     target_phys_addr_t addend;
19489fa3e853Sbellard     int ret;
194984b7b8e7Sbellard     CPUTLBEntry *te;
1950a1d1bb31Saliguori     CPUWatchpoint *wp;
19510f459d16Spbrook     target_phys_addr_t iotlb;
19529fa3e853Sbellard 
195392e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
19549fa3e853Sbellard     if (!p) {
19559fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
19569fa3e853Sbellard     } else {
19579fa3e853Sbellard         pd = p->phys_offset;
19589fa3e853Sbellard     }
19599fa3e853Sbellard #if defined(DEBUG_TLB)
19606ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
19616ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
19629fa3e853Sbellard #endif
19639fa3e853Sbellard 
19649fa3e853Sbellard     ret = 0;
19659fa3e853Sbellard     address = vaddr;
19660f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
19670f459d16Spbrook         /* IO memory case (romd handled later) */
19680f459d16Spbrook         address |= TLB_MMIO;
19690f459d16Spbrook     }
19709fa3e853Sbellard     addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
19710f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
19720f459d16Spbrook         /* Normal RAM.  */
19730f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
19740f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
19750f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
19760f459d16Spbrook         else
19770f459d16Spbrook             iotlb |= IO_MEM_ROM;
19780f459d16Spbrook     } else {
19790f459d16Spbrook         /* IO handlers are currently passed a phsical address.
19800f459d16Spbrook            It would be nice to pass an offset from the base address
19810f459d16Spbrook            of that region.  This would avoid having to special case RAM,
19820f459d16Spbrook            and avoid full address decoding in every device.
19830f459d16Spbrook            We can't use the high bits of pd for this because
19840f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
19858da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
19868da3ff18Spbrook         if (p) {
19878da3ff18Spbrook             iotlb += p->region_offset;
19888da3ff18Spbrook         } else {
19898da3ff18Spbrook             iotlb += paddr;
19908da3ff18Spbrook         }
19919fa3e853Sbellard     }
19929fa3e853Sbellard 
19930f459d16Spbrook     code_address = address;
19946658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
19956658ffb8Spbrook        watchpoint trap routines.  */
1996c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1997a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
19980f459d16Spbrook             iotlb = io_mem_watch + paddr;
19990f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
20000f459d16Spbrook                reads of pages with a write breakpoint.  */
20010f459d16Spbrook             address |= TLB_MMIO;
20026658ffb8Spbrook         }
20036658ffb8Spbrook     }
20046658ffb8Spbrook 
200590f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
20060f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
20076ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
20080f459d16Spbrook     te->addend = addend - vaddr;
200967b915a5Sbellard     if (prot & PAGE_READ) {
201084b7b8e7Sbellard         te->addr_read = address;
20119fa3e853Sbellard     } else {
201284b7b8e7Sbellard         te->addr_read = -1;
201384b7b8e7Sbellard     }
20145c751e99Sedgar_igl 
201584b7b8e7Sbellard     if (prot & PAGE_EXEC) {
20160f459d16Spbrook         te->addr_code = code_address;
201784b7b8e7Sbellard     } else {
201884b7b8e7Sbellard         te->addr_code = -1;
20199fa3e853Sbellard     }
202067b915a5Sbellard     if (prot & PAGE_WRITE) {
2021856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2022856074ecSbellard             (pd & IO_MEM_ROMD)) {
20230f459d16Spbrook             /* Write access calls the I/O callback.  */
20240f459d16Spbrook             te->addr_write = address | TLB_MMIO;
20253a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
20261ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
20270f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
20289fa3e853Sbellard         } else {
202984b7b8e7Sbellard             te->addr_write = address;
20309fa3e853Sbellard         }
20319fa3e853Sbellard     } else {
203284b7b8e7Sbellard         te->addr_write = -1;
20339fa3e853Sbellard     }
20349fa3e853Sbellard     return ret;
20359fa3e853Sbellard }
20369fa3e853Sbellard 
20370124311eSbellard #else
20380124311eSbellard 
2039ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
20400124311eSbellard {
20410124311eSbellard }
20420124311eSbellard 
20432e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
20440124311eSbellard {
20450124311eSbellard }
20460124311eSbellard 
204784b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
20482e12669aSbellard                       target_phys_addr_t paddr, int prot,
20496ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
205033417e70Sbellard {
20519fa3e853Sbellard     return 0;
205233417e70Sbellard }
205333417e70Sbellard 
20549fa3e853Sbellard /* dump memory mappings */
20559fa3e853Sbellard void page_dump(FILE *f)
205633417e70Sbellard {
20579fa3e853Sbellard     unsigned long start, end;
20589fa3e853Sbellard     int i, j, prot, prot1;
20599fa3e853Sbellard     PageDesc *p;
20609fa3e853Sbellard 
20619fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
20629fa3e853Sbellard             "start", "end", "size", "prot");
20639fa3e853Sbellard     start = -1;
20649fa3e853Sbellard     end = -1;
20659fa3e853Sbellard     prot = 0;
20669fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
20679fa3e853Sbellard         if (i < L1_SIZE)
20689fa3e853Sbellard             p = l1_map[i];
20699fa3e853Sbellard         else
20709fa3e853Sbellard             p = NULL;
20719fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
207233417e70Sbellard             if (!p)
20739fa3e853Sbellard                 prot1 = 0;
20749fa3e853Sbellard             else
20759fa3e853Sbellard                 prot1 = p[j].flags;
20769fa3e853Sbellard             if (prot1 != prot) {
20779fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
20789fa3e853Sbellard                 if (start != -1) {
20799fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
20809fa3e853Sbellard                             start, end, end - start,
20819fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
20829fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
20839fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
208433417e70Sbellard                 }
20859fa3e853Sbellard                 if (prot1 != 0)
20869fa3e853Sbellard                     start = end;
20879fa3e853Sbellard                 else
20889fa3e853Sbellard                     start = -1;
20899fa3e853Sbellard                 prot = prot1;
20909fa3e853Sbellard             }
20919fa3e853Sbellard             if (!p)
20929fa3e853Sbellard                 break;
20939fa3e853Sbellard         }
20949fa3e853Sbellard     }
20959fa3e853Sbellard }
20969fa3e853Sbellard 
209753a5960aSpbrook int page_get_flags(target_ulong address)
20989fa3e853Sbellard {
20999fa3e853Sbellard     PageDesc *p;
21009fa3e853Sbellard 
21019fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
21029fa3e853Sbellard     if (!p)
21039fa3e853Sbellard         return 0;
21049fa3e853Sbellard     return p->flags;
21059fa3e853Sbellard }
21069fa3e853Sbellard 
21079fa3e853Sbellard /* modify the flags of a page and invalidate the code if
21089fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
21099fa3e853Sbellard    depending on PAGE_WRITE */
211053a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
21119fa3e853Sbellard {
21129fa3e853Sbellard     PageDesc *p;
211353a5960aSpbrook     target_ulong addr;
21149fa3e853Sbellard 
2115c8a706feSpbrook     /* mmap_lock should already be held.  */
21169fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
21179fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
21189fa3e853Sbellard     if (flags & PAGE_WRITE)
21199fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
21209fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21219fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
212217e2377aSpbrook         /* We may be called for host regions that are outside guest
212317e2377aSpbrook            address space.  */
212417e2377aSpbrook         if (!p)
212517e2377aSpbrook             return;
21269fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
21279fa3e853Sbellard            inside */
21289fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
21299fa3e853Sbellard             (flags & PAGE_WRITE) &&
21309fa3e853Sbellard             p->first_tb) {
2131d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
21329fa3e853Sbellard         }
21339fa3e853Sbellard         p->flags = flags;
21349fa3e853Sbellard     }
21359fa3e853Sbellard }
21369fa3e853Sbellard 
21373d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
21383d97b40bSths {
21393d97b40bSths     PageDesc *p;
21403d97b40bSths     target_ulong end;
21413d97b40bSths     target_ulong addr;
21423d97b40bSths 
214355f280c9Sbalrog     if (start + len < start)
214455f280c9Sbalrog         /* we've wrapped around */
214555f280c9Sbalrog         return -1;
214655f280c9Sbalrog 
21473d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
21483d97b40bSths     start = start & TARGET_PAGE_MASK;
21493d97b40bSths 
21503d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21513d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21523d97b40bSths         if( !p )
21533d97b40bSths             return -1;
21543d97b40bSths         if( !(p->flags & PAGE_VALID) )
21553d97b40bSths             return -1;
21563d97b40bSths 
2157dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21583d97b40bSths             return -1;
2159dae3270cSbellard         if (flags & PAGE_WRITE) {
2160dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21613d97b40bSths                 return -1;
2162dae3270cSbellard             /* unprotect the page if it was put read-only because it
2163dae3270cSbellard                contains translated code */
2164dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2165dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2166dae3270cSbellard                     return -1;
2167dae3270cSbellard             }
2168dae3270cSbellard             return 0;
2169dae3270cSbellard         }
21703d97b40bSths     }
21713d97b40bSths     return 0;
21723d97b40bSths }
21733d97b40bSths 
21749fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
21759fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
217653a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
21779fa3e853Sbellard {
21789fa3e853Sbellard     unsigned int page_index, prot, pindex;
21799fa3e853Sbellard     PageDesc *p, *p1;
218053a5960aSpbrook     target_ulong host_start, host_end, addr;
21819fa3e853Sbellard 
2182c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2183c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2184c8a706feSpbrook        practice it seems to be ok.  */
2185c8a706feSpbrook     mmap_lock();
2186c8a706feSpbrook 
218783fb7adfSbellard     host_start = address & qemu_host_page_mask;
21889fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
21899fa3e853Sbellard     p1 = page_find(page_index);
2190c8a706feSpbrook     if (!p1) {
2191c8a706feSpbrook         mmap_unlock();
21929fa3e853Sbellard         return 0;
2193c8a706feSpbrook     }
219483fb7adfSbellard     host_end = host_start + qemu_host_page_size;
21959fa3e853Sbellard     p = p1;
21969fa3e853Sbellard     prot = 0;
21979fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
21989fa3e853Sbellard         prot |= p->flags;
21999fa3e853Sbellard         p++;
22009fa3e853Sbellard     }
22019fa3e853Sbellard     /* if the page was really writable, then we change its
22029fa3e853Sbellard        protection back to writable */
22039fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
22049fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
22059fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
220653a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
22079fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
22089fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
22099fa3e853Sbellard             /* and since the content will be modified, we must invalidate
22109fa3e853Sbellard                the corresponding translated code. */
2211d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
22129fa3e853Sbellard #ifdef DEBUG_TB_CHECK
22139fa3e853Sbellard             tb_invalidate_check(address);
22149fa3e853Sbellard #endif
2215c8a706feSpbrook             mmap_unlock();
22169fa3e853Sbellard             return 1;
22179fa3e853Sbellard         }
22189fa3e853Sbellard     }
2219c8a706feSpbrook     mmap_unlock();
22209fa3e853Sbellard     return 0;
22219fa3e853Sbellard }
22229fa3e853Sbellard 
22236a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
22246a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
22251ccde1cbSbellard {
22261ccde1cbSbellard }
22279fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
222833417e70Sbellard 
2229e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
22308da3ff18Spbrook 
2231db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
22328da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset);
223300f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
22348da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset);
2235db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2236db7b5426Sblueswir1                       need_subpage)                                     \
2237db7b5426Sblueswir1     do {                                                                \
2238db7b5426Sblueswir1         if (addr > start_addr)                                          \
2239db7b5426Sblueswir1             start_addr2 = 0;                                            \
2240db7b5426Sblueswir1         else {                                                          \
2241db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2242db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2243db7b5426Sblueswir1                 need_subpage = 1;                                       \
2244db7b5426Sblueswir1         }                                                               \
2245db7b5426Sblueswir1                                                                         \
224649e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2247db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2248db7b5426Sblueswir1         else {                                                          \
2249db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2250db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2251db7b5426Sblueswir1                 need_subpage = 1;                                       \
2252db7b5426Sblueswir1         }                                                               \
2253db7b5426Sblueswir1     } while (0)
2254db7b5426Sblueswir1 
225533417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
225633417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
22578da3ff18Spbrook    io memory page.  The address used when calling the IO function is
22588da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
22598da3ff18Spbrook    start_region and regon_offset are rounded down to a page boundary
22608da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
22618da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
22628da3ff18Spbrook void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
226300f82b8aSaurel32                                          ram_addr_t size,
22648da3ff18Spbrook                                          ram_addr_t phys_offset,
22658da3ff18Spbrook                                          ram_addr_t region_offset)
226633417e70Sbellard {
2267108c49b8Sbellard     target_phys_addr_t addr, end_addr;
226892e873b9Sbellard     PhysPageDesc *p;
22699d42037bSbellard     CPUState *env;
227000f82b8aSaurel32     ram_addr_t orig_size = size;
2271db7b5426Sblueswir1     void *subpage;
227233417e70Sbellard 
2273da260249Sbellard #ifdef USE_KQEMU
2274da260249Sbellard     /* XXX: should not depend on cpu context */
2275da260249Sbellard     env = first_cpu;
2276da260249Sbellard     if (env->kqemu_enabled) {
2277da260249Sbellard         kqemu_set_phys_mem(start_addr, size, phys_offset);
2278da260249Sbellard     }
2279da260249Sbellard #endif
22807ba1e619Saliguori     if (kvm_enabled())
22817ba1e619Saliguori         kvm_set_phys_mem(start_addr, size, phys_offset);
22827ba1e619Saliguori 
22838da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
22845fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
228549e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
228649e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2287db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2288db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
228900f82b8aSaurel32             ram_addr_t orig_memory = p->phys_offset;
2290db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2291db7b5426Sblueswir1             int need_subpage = 0;
2292db7b5426Sblueswir1 
2293db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2294db7b5426Sblueswir1                           need_subpage);
22954254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2296db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2297db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
22988da3ff18Spbrook                                            &p->phys_offset, orig_memory,
22998da3ff18Spbrook                                            p->region_offset);
2300db7b5426Sblueswir1                 } else {
2301db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2302db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2303db7b5426Sblueswir1                 }
23048da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
23058da3ff18Spbrook                                  region_offset);
23068da3ff18Spbrook                 p->region_offset = 0;
2307db7b5426Sblueswir1             } else {
2308db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2309db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2310db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2311db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2312db7b5426Sblueswir1             }
2313db7b5426Sblueswir1         } else {
2314108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
23159fa3e853Sbellard             p->phys_offset = phys_offset;
23168da3ff18Spbrook             p->region_offset = region_offset;
23172a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
23188da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
231933417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
23208da3ff18Spbrook             } else {
2321db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2322db7b5426Sblueswir1                 int need_subpage = 0;
2323db7b5426Sblueswir1 
2324db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2325db7b5426Sblueswir1                               end_addr2, need_subpage);
2326db7b5426Sblueswir1 
23274254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2328db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23298da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
23308da3ff18Spbrook                                            0);
2331db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
23328da3ff18Spbrook                                      phys_offset, region_offset);
23338da3ff18Spbrook                     p->region_offset = 0;
2334db7b5426Sblueswir1                 }
2335db7b5426Sblueswir1             }
2336db7b5426Sblueswir1         }
23378da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
233833417e70Sbellard     }
23399d42037bSbellard 
23409d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
23419d42037bSbellard        reset the modified entries */
23429d42037bSbellard     /* XXX: slow ! */
23439d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
23449d42037bSbellard         tlb_flush(env, 1);
23459d42037bSbellard     }
234633417e70Sbellard }
234733417e70Sbellard 
2348ba863458Sbellard /* XXX: temporary until new memory mapping API */
234900f82b8aSaurel32 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2350ba863458Sbellard {
2351ba863458Sbellard     PhysPageDesc *p;
2352ba863458Sbellard 
2353ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2354ba863458Sbellard     if (!p)
2355ba863458Sbellard         return IO_MEM_UNASSIGNED;
2356ba863458Sbellard     return p->phys_offset;
2357ba863458Sbellard }
2358ba863458Sbellard 
2359f65ed4c1Saliguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2360f65ed4c1Saliguori {
2361f65ed4c1Saliguori     if (kvm_enabled())
2362f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2363f65ed4c1Saliguori }
2364f65ed4c1Saliguori 
2365f65ed4c1Saliguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2366f65ed4c1Saliguori {
2367f65ed4c1Saliguori     if (kvm_enabled())
2368f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2369f65ed4c1Saliguori }
2370f65ed4c1Saliguori 
2371e9a1ab19Sbellard /* XXX: better than nothing */
237200f82b8aSaurel32 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2373e9a1ab19Sbellard {
2374e9a1ab19Sbellard     ram_addr_t addr;
23757fb4fdcfSbalrog     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2376012a7045Sths         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2377ed441467Sbellard                 (uint64_t)size, (uint64_t)phys_ram_size);
2378e9a1ab19Sbellard         abort();
2379e9a1ab19Sbellard     }
2380e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2381e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2382e9a1ab19Sbellard     return addr;
2383e9a1ab19Sbellard }
2384e9a1ab19Sbellard 
2385e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2386e9a1ab19Sbellard {
2387e9a1ab19Sbellard }
2388e9a1ab19Sbellard 
2389a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
239033417e70Sbellard {
239167d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2392ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
239367d3b957Spbrook #endif
23940a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2395e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2396e18231a3Sblueswir1 #endif
2397e18231a3Sblueswir1     return 0;
2398e18231a3Sblueswir1 }
2399e18231a3Sblueswir1 
2400e18231a3Sblueswir1 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2401e18231a3Sblueswir1 {
2402e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2403e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2404e18231a3Sblueswir1 #endif
24050a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2406e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2407e18231a3Sblueswir1 #endif
2408e18231a3Sblueswir1     return 0;
2409e18231a3Sblueswir1 }
2410e18231a3Sblueswir1 
2411e18231a3Sblueswir1 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2412e18231a3Sblueswir1 {
2413e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2414e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415e18231a3Sblueswir1 #endif
24160a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2417e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2418b4f0a316Sblueswir1 #endif
241933417e70Sbellard     return 0;
242033417e70Sbellard }
242133417e70Sbellard 
2422a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
242333417e70Sbellard {
242467d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2425ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
242667d3b957Spbrook #endif
24270a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2428e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2429e18231a3Sblueswir1 #endif
2430e18231a3Sblueswir1 }
2431e18231a3Sblueswir1 
2432e18231a3Sblueswir1 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2433e18231a3Sblueswir1 {
2434e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2435e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2436e18231a3Sblueswir1 #endif
24370a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2438e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2439e18231a3Sblueswir1 #endif
2440e18231a3Sblueswir1 }
2441e18231a3Sblueswir1 
2442e18231a3Sblueswir1 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2443e18231a3Sblueswir1 {
2444e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2445e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2446e18231a3Sblueswir1 #endif
24470a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2448e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2449b4f0a316Sblueswir1 #endif
245033417e70Sbellard }
245133417e70Sbellard 
245233417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
245333417e70Sbellard     unassigned_mem_readb,
2454e18231a3Sblueswir1     unassigned_mem_readw,
2455e18231a3Sblueswir1     unassigned_mem_readl,
245633417e70Sbellard };
245733417e70Sbellard 
245833417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
245933417e70Sbellard     unassigned_mem_writeb,
2460e18231a3Sblueswir1     unassigned_mem_writew,
2461e18231a3Sblueswir1     unassigned_mem_writel,
246233417e70Sbellard };
246333417e70Sbellard 
24640f459d16Spbrook static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
24650f459d16Spbrook                                 uint32_t val)
24661ccde1cbSbellard {
24673a7d929eSbellard     int dirty_flags;
24683a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24693a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
24703a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
24713a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
24723a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24733a7d929eSbellard #endif
24743a7d929eSbellard     }
24750f459d16Spbrook     stb_p(phys_ram_base + ram_addr, val);
2476f32fc648Sbellard #ifdef USE_KQEMU
2477f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2478f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2479f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2480f32fc648Sbellard #endif
2481f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2482f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2484f23db169Sbellard        flushed */
2485f23db169Sbellard     if (dirty_flags == 0xff)
24862e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
24871ccde1cbSbellard }
24881ccde1cbSbellard 
24890f459d16Spbrook static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
24900f459d16Spbrook                                 uint32_t val)
24911ccde1cbSbellard {
24923a7d929eSbellard     int dirty_flags;
24933a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24943a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
24953a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
24963a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
24973a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
24983a7d929eSbellard #endif
24993a7d929eSbellard     }
25000f459d16Spbrook     stw_p(phys_ram_base + ram_addr, val);
2501f32fc648Sbellard #ifdef USE_KQEMU
2502f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2503f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2505f32fc648Sbellard #endif
2506f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2508f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2509f23db169Sbellard        flushed */
2510f23db169Sbellard     if (dirty_flags == 0xff)
25112e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
25121ccde1cbSbellard }
25131ccde1cbSbellard 
25140f459d16Spbrook static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
25150f459d16Spbrook                                 uint32_t val)
25161ccde1cbSbellard {
25173a7d929eSbellard     int dirty_flags;
25183a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25193a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25203a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
25213a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
25223a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25233a7d929eSbellard #endif
25243a7d929eSbellard     }
25250f459d16Spbrook     stl_p(phys_ram_base + ram_addr, val);
2526f32fc648Sbellard #ifdef USE_KQEMU
2527f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2528f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2529f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2530f32fc648Sbellard #endif
2531f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2532f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2533f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2534f23db169Sbellard        flushed */
2535f23db169Sbellard     if (dirty_flags == 0xff)
25362e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
25371ccde1cbSbellard }
25381ccde1cbSbellard 
25393a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
25403a7d929eSbellard     NULL, /* never used */
25413a7d929eSbellard     NULL, /* never used */
25423a7d929eSbellard     NULL, /* never used */
25433a7d929eSbellard };
25443a7d929eSbellard 
25451ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
25461ccde1cbSbellard     notdirty_mem_writeb,
25471ccde1cbSbellard     notdirty_mem_writew,
25481ccde1cbSbellard     notdirty_mem_writel,
25491ccde1cbSbellard };
25501ccde1cbSbellard 
25510f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2552b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
25530f459d16Spbrook {
25540f459d16Spbrook     CPUState *env = cpu_single_env;
255506d55cc1Saliguori     target_ulong pc, cs_base;
255606d55cc1Saliguori     TranslationBlock *tb;
25570f459d16Spbrook     target_ulong vaddr;
2558a1d1bb31Saliguori     CPUWatchpoint *wp;
255906d55cc1Saliguori     int cpu_flags;
25600f459d16Spbrook 
256106d55cc1Saliguori     if (env->watchpoint_hit) {
256206d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
256306d55cc1Saliguori          * the debug interrupt so that is will trigger after the
256406d55cc1Saliguori          * current instruction. */
256506d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
256606d55cc1Saliguori         return;
256706d55cc1Saliguori     }
25682e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2569c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2570b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2571b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
25726e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
25736e140f28Saliguori             if (!env->watchpoint_hit) {
2574a1d1bb31Saliguori                 env->watchpoint_hit = wp;
257506d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
257606d55cc1Saliguori                 if (!tb) {
25776e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
25786e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
257906d55cc1Saliguori                 }
258006d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
258106d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
258206d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
258306d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
258406d55cc1Saliguori                 } else {
258506d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
258606d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
258706d55cc1Saliguori                 }
258806d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
25890f459d16Spbrook             }
25906e140f28Saliguori         } else {
25916e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
25926e140f28Saliguori         }
25930f459d16Spbrook     }
25940f459d16Spbrook }
25950f459d16Spbrook 
25966658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
25976658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
25986658ffb8Spbrook    phys routines.  */
25996658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
26006658ffb8Spbrook {
2601b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
26026658ffb8Spbrook     return ldub_phys(addr);
26036658ffb8Spbrook }
26046658ffb8Spbrook 
26056658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
26066658ffb8Spbrook {
2607b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
26086658ffb8Spbrook     return lduw_phys(addr);
26096658ffb8Spbrook }
26106658ffb8Spbrook 
26116658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
26126658ffb8Spbrook {
2613b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
26146658ffb8Spbrook     return ldl_phys(addr);
26156658ffb8Spbrook }
26166658ffb8Spbrook 
26176658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
26186658ffb8Spbrook                              uint32_t val)
26196658ffb8Spbrook {
2620b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
26216658ffb8Spbrook     stb_phys(addr, val);
26226658ffb8Spbrook }
26236658ffb8Spbrook 
26246658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
26256658ffb8Spbrook                              uint32_t val)
26266658ffb8Spbrook {
2627b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
26286658ffb8Spbrook     stw_phys(addr, val);
26296658ffb8Spbrook }
26306658ffb8Spbrook 
26316658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
26326658ffb8Spbrook                              uint32_t val)
26336658ffb8Spbrook {
2634b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
26356658ffb8Spbrook     stl_phys(addr, val);
26366658ffb8Spbrook }
26376658ffb8Spbrook 
26386658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
26396658ffb8Spbrook     watch_mem_readb,
26406658ffb8Spbrook     watch_mem_readw,
26416658ffb8Spbrook     watch_mem_readl,
26426658ffb8Spbrook };
26436658ffb8Spbrook 
26446658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
26456658ffb8Spbrook     watch_mem_writeb,
26466658ffb8Spbrook     watch_mem_writew,
26476658ffb8Spbrook     watch_mem_writel,
26486658ffb8Spbrook };
26496658ffb8Spbrook 
2650db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2651db7b5426Sblueswir1                                  unsigned int len)
2652db7b5426Sblueswir1 {
2653db7b5426Sblueswir1     uint32_t ret;
2654db7b5426Sblueswir1     unsigned int idx;
2655db7b5426Sblueswir1 
26568da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2657db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2658db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2659db7b5426Sblueswir1            mmio, len, addr, idx);
2660db7b5426Sblueswir1 #endif
26618da3ff18Spbrook     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
26628da3ff18Spbrook                                        addr + mmio->region_offset[idx][0][len]);
2663db7b5426Sblueswir1 
2664db7b5426Sblueswir1     return ret;
2665db7b5426Sblueswir1 }
2666db7b5426Sblueswir1 
2667db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2668db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2669db7b5426Sblueswir1 {
2670db7b5426Sblueswir1     unsigned int idx;
2671db7b5426Sblueswir1 
26728da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2673db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2674db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2675db7b5426Sblueswir1            mmio, len, addr, idx, value);
2676db7b5426Sblueswir1 #endif
26778da3ff18Spbrook     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
26788da3ff18Spbrook                                   addr + mmio->region_offset[idx][1][len],
26798da3ff18Spbrook                                   value);
2680db7b5426Sblueswir1 }
2681db7b5426Sblueswir1 
2682db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2683db7b5426Sblueswir1 {
2684db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2685db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2686db7b5426Sblueswir1 #endif
2687db7b5426Sblueswir1 
2688db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2689db7b5426Sblueswir1 }
2690db7b5426Sblueswir1 
2691db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2692db7b5426Sblueswir1                             uint32_t value)
2693db7b5426Sblueswir1 {
2694db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2695db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2696db7b5426Sblueswir1 #endif
2697db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2698db7b5426Sblueswir1 }
2699db7b5426Sblueswir1 
2700db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2701db7b5426Sblueswir1 {
2702db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2703db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2704db7b5426Sblueswir1 #endif
2705db7b5426Sblueswir1 
2706db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2707db7b5426Sblueswir1 }
2708db7b5426Sblueswir1 
2709db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2710db7b5426Sblueswir1                             uint32_t value)
2711db7b5426Sblueswir1 {
2712db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2713db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2714db7b5426Sblueswir1 #endif
2715db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2716db7b5426Sblueswir1 }
2717db7b5426Sblueswir1 
2718db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2719db7b5426Sblueswir1 {
2720db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2721db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2722db7b5426Sblueswir1 #endif
2723db7b5426Sblueswir1 
2724db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2725db7b5426Sblueswir1 }
2726db7b5426Sblueswir1 
2727db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2728db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2729db7b5426Sblueswir1 {
2730db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2731db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2732db7b5426Sblueswir1 #endif
2733db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2734db7b5426Sblueswir1 }
2735db7b5426Sblueswir1 
2736db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2737db7b5426Sblueswir1     &subpage_readb,
2738db7b5426Sblueswir1     &subpage_readw,
2739db7b5426Sblueswir1     &subpage_readl,
2740db7b5426Sblueswir1 };
2741db7b5426Sblueswir1 
2742db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2743db7b5426Sblueswir1     &subpage_writeb,
2744db7b5426Sblueswir1     &subpage_writew,
2745db7b5426Sblueswir1     &subpage_writel,
2746db7b5426Sblueswir1 };
2747db7b5426Sblueswir1 
2748db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
27498da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset)
2750db7b5426Sblueswir1 {
2751db7b5426Sblueswir1     int idx, eidx;
27524254fab8Sblueswir1     unsigned int i;
2753db7b5426Sblueswir1 
2754db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2755db7b5426Sblueswir1         return -1;
2756db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2757db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2758db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2759db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2760db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2761db7b5426Sblueswir1 #endif
2762db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2763db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
27644254fab8Sblueswir1         for (i = 0; i < 4; i++) {
27653ee89922Sblueswir1             if (io_mem_read[memory][i]) {
27663ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
27673ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
27688da3ff18Spbrook                 mmio->region_offset[idx][0][i] = region_offset;
27694254fab8Sblueswir1             }
27703ee89922Sblueswir1             if (io_mem_write[memory][i]) {
27713ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
27723ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
27738da3ff18Spbrook                 mmio->region_offset[idx][1][i] = region_offset;
27743ee89922Sblueswir1             }
27753ee89922Sblueswir1         }
2776db7b5426Sblueswir1     }
2777db7b5426Sblueswir1 
2778db7b5426Sblueswir1     return 0;
2779db7b5426Sblueswir1 }
2780db7b5426Sblueswir1 
278100f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
27828da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset)
2783db7b5426Sblueswir1 {
2784db7b5426Sblueswir1     subpage_t *mmio;
2785db7b5426Sblueswir1     int subpage_memory;
2786db7b5426Sblueswir1 
2787db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
27881eec614bSaliguori 
2789db7b5426Sblueswir1     mmio->base = base;
2790db7b5426Sblueswir1     subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2791db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2792db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2793db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2794db7b5426Sblueswir1 #endif
2795db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
27968da3ff18Spbrook     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
27978da3ff18Spbrook                          region_offset);
2798db7b5426Sblueswir1 
2799db7b5426Sblueswir1     return mmio;
2800db7b5426Sblueswir1 }
2801db7b5426Sblueswir1 
280288715657Saliguori static int get_free_io_mem_idx(void)
280388715657Saliguori {
280488715657Saliguori     int i;
280588715657Saliguori 
280688715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
280788715657Saliguori         if (!io_mem_used[i]) {
280888715657Saliguori             io_mem_used[i] = 1;
280988715657Saliguori             return i;
281088715657Saliguori         }
281188715657Saliguori 
281288715657Saliguori     return -1;
281388715657Saliguori }
281488715657Saliguori 
281533417e70Sbellard static void io_mem_init(void)
281633417e70Sbellard {
281788715657Saliguori     int i;
281888715657Saliguori 
28193a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2820a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
28213a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
282288715657Saliguori     for (i=0; i<5; i++)
282388715657Saliguori         io_mem_used[i] = 1;
28241ccde1cbSbellard 
28250f459d16Spbrook     io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
28266658ffb8Spbrook                                           watch_mem_write, NULL);
28271ccde1cbSbellard     /* alloc dirty bits array */
28280a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
28293a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
283033417e70Sbellard }
283133417e70Sbellard 
283233417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
283333417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
28343ee89922Sblueswir1    2). Functions can be omitted with a NULL function pointer. The
28353ee89922Sblueswir1    registered functions may be modified dynamically later.
28363ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
28374254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
28384254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
28394254fab8Sblueswir1    returned if error. */
284033417e70Sbellard int cpu_register_io_memory(int io_index,
284133417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2842a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2843a4193c8aSbellard                            void *opaque)
284433417e70Sbellard {
28454254fab8Sblueswir1     int i, subwidth = 0;
284633417e70Sbellard 
284733417e70Sbellard     if (io_index <= 0) {
284888715657Saliguori         io_index = get_free_io_mem_idx();
284988715657Saliguori         if (io_index == -1)
285088715657Saliguori             return io_index;
285133417e70Sbellard     } else {
285233417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
285333417e70Sbellard             return -1;
285433417e70Sbellard     }
285533417e70Sbellard 
285633417e70Sbellard     for(i = 0;i < 3; i++) {
28574254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
28584254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
285933417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
286033417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
286133417e70Sbellard     }
2862a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
28634254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
286433417e70Sbellard }
286561382a50Sbellard 
286688715657Saliguori void cpu_unregister_io_memory(int io_table_address)
286788715657Saliguori {
286888715657Saliguori     int i;
286988715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
287088715657Saliguori 
287188715657Saliguori     for (i=0;i < 3; i++) {
287288715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
287388715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
287488715657Saliguori     }
287588715657Saliguori     io_mem_opaque[io_index] = NULL;
287688715657Saliguori     io_mem_used[io_index] = 0;
287788715657Saliguori }
287888715657Saliguori 
28798926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
28808926b517Sbellard {
28818926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
28828926b517Sbellard }
28838926b517Sbellard 
28848926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
28858926b517Sbellard {
28868926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
28878926b517Sbellard }
28888926b517Sbellard 
2889e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2890e2eef170Spbrook 
289113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
289213eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
28932e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
289413eb76e0Sbellard                             int len, int is_write)
289513eb76e0Sbellard {
289613eb76e0Sbellard     int l, flags;
289713eb76e0Sbellard     target_ulong page;
289853a5960aSpbrook     void * p;
289913eb76e0Sbellard 
290013eb76e0Sbellard     while (len > 0) {
290113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
290213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
290313eb76e0Sbellard         if (l > len)
290413eb76e0Sbellard             l = len;
290513eb76e0Sbellard         flags = page_get_flags(page);
290613eb76e0Sbellard         if (!(flags & PAGE_VALID))
290713eb76e0Sbellard             return;
290813eb76e0Sbellard         if (is_write) {
290913eb76e0Sbellard             if (!(flags & PAGE_WRITE))
291013eb76e0Sbellard                 return;
2911579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
291272fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2913579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2914579a97f7Sbellard                 return;
291572fb7daaSaurel32             memcpy(p, buf, l);
291672fb7daaSaurel32             unlock_user(p, addr, l);
291713eb76e0Sbellard         } else {
291813eb76e0Sbellard             if (!(flags & PAGE_READ))
291913eb76e0Sbellard                 return;
2920579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
292172fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2922579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2923579a97f7Sbellard                 return;
292472fb7daaSaurel32             memcpy(buf, p, l);
29255b257578Saurel32             unlock_user(p, addr, 0);
292613eb76e0Sbellard         }
292713eb76e0Sbellard         len -= l;
292813eb76e0Sbellard         buf += l;
292913eb76e0Sbellard         addr += l;
293013eb76e0Sbellard     }
293113eb76e0Sbellard }
29328df1cd07Sbellard 
293313eb76e0Sbellard #else
29342e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
293513eb76e0Sbellard                             int len, int is_write)
293613eb76e0Sbellard {
293713eb76e0Sbellard     int l, io_index;
293813eb76e0Sbellard     uint8_t *ptr;
293913eb76e0Sbellard     uint32_t val;
29402e12669aSbellard     target_phys_addr_t page;
29412e12669aSbellard     unsigned long pd;
294292e873b9Sbellard     PhysPageDesc *p;
294313eb76e0Sbellard 
294413eb76e0Sbellard     while (len > 0) {
294513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
294613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
294713eb76e0Sbellard         if (l > len)
294813eb76e0Sbellard             l = len;
294992e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
295013eb76e0Sbellard         if (!p) {
295113eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
295213eb76e0Sbellard         } else {
295313eb76e0Sbellard             pd = p->phys_offset;
295413eb76e0Sbellard         }
295513eb76e0Sbellard 
295613eb76e0Sbellard         if (is_write) {
29573a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29586c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
295913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29608da3ff18Spbrook                 if (p)
29616c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
29626a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
29636a00d601Sbellard                    potential bugs */
29646c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
29651c213d19Sbellard                     /* 32 bit write access */
2966c27004ecSbellard                     val = ldl_p(buf);
29676c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
296813eb76e0Sbellard                     l = 4;
29696c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
29701c213d19Sbellard                     /* 16 bit write access */
2971c27004ecSbellard                     val = lduw_p(buf);
29726c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
297313eb76e0Sbellard                     l = 2;
297413eb76e0Sbellard                 } else {
29751c213d19Sbellard                     /* 8 bit write access */
2976c27004ecSbellard                     val = ldub_p(buf);
29776c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
297813eb76e0Sbellard                     l = 1;
297913eb76e0Sbellard                 }
298013eb76e0Sbellard             } else {
2981b448f2f3Sbellard                 unsigned long addr1;
2982b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
298313eb76e0Sbellard                 /* RAM case */
2984b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
298513eb76e0Sbellard                 memcpy(ptr, buf, l);
29863a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2987b448f2f3Sbellard                     /* invalidate code */
2988b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2989b448f2f3Sbellard                     /* set dirty bit */
2990f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2991f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
299213eb76e0Sbellard                 }
29933a7d929eSbellard             }
299413eb76e0Sbellard         } else {
29952a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
29962a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
29976c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
299813eb76e0Sbellard                 /* I/O case */
299913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
30008da3ff18Spbrook                 if (p)
30016c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
30026c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
300313eb76e0Sbellard                     /* 32 bit read access */
30046c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3005c27004ecSbellard                     stl_p(buf, val);
300613eb76e0Sbellard                     l = 4;
30076c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
300813eb76e0Sbellard                     /* 16 bit read access */
30096c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3010c27004ecSbellard                     stw_p(buf, val);
301113eb76e0Sbellard                     l = 2;
301213eb76e0Sbellard                 } else {
30131c213d19Sbellard                     /* 8 bit read access */
30146c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3015c27004ecSbellard                     stb_p(buf, val);
301613eb76e0Sbellard                     l = 1;
301713eb76e0Sbellard                 }
301813eb76e0Sbellard             } else {
301913eb76e0Sbellard                 /* RAM case */
302013eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
302113eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
302213eb76e0Sbellard                 memcpy(buf, ptr, l);
302313eb76e0Sbellard             }
302413eb76e0Sbellard         }
302513eb76e0Sbellard         len -= l;
302613eb76e0Sbellard         buf += l;
302713eb76e0Sbellard         addr += l;
302813eb76e0Sbellard     }
302913eb76e0Sbellard }
30308df1cd07Sbellard 
3031d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3032d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3033d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3034d0ecd2aaSbellard {
3035d0ecd2aaSbellard     int l;
3036d0ecd2aaSbellard     uint8_t *ptr;
3037d0ecd2aaSbellard     target_phys_addr_t page;
3038d0ecd2aaSbellard     unsigned long pd;
3039d0ecd2aaSbellard     PhysPageDesc *p;
3040d0ecd2aaSbellard 
3041d0ecd2aaSbellard     while (len > 0) {
3042d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3043d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3044d0ecd2aaSbellard         if (l > len)
3045d0ecd2aaSbellard             l = len;
3046d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3047d0ecd2aaSbellard         if (!p) {
3048d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3049d0ecd2aaSbellard         } else {
3050d0ecd2aaSbellard             pd = p->phys_offset;
3051d0ecd2aaSbellard         }
3052d0ecd2aaSbellard 
3053d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
30542a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
30552a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3056d0ecd2aaSbellard             /* do nothing */
3057d0ecd2aaSbellard         } else {
3058d0ecd2aaSbellard             unsigned long addr1;
3059d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3060d0ecd2aaSbellard             /* ROM/RAM case */
3061d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
3062d0ecd2aaSbellard             memcpy(ptr, buf, l);
3063d0ecd2aaSbellard         }
3064d0ecd2aaSbellard         len -= l;
3065d0ecd2aaSbellard         buf += l;
3066d0ecd2aaSbellard         addr += l;
3067d0ecd2aaSbellard     }
3068d0ecd2aaSbellard }
3069d0ecd2aaSbellard 
30706d16c2f8Saliguori typedef struct {
30716d16c2f8Saliguori     void *buffer;
30726d16c2f8Saliguori     target_phys_addr_t addr;
30736d16c2f8Saliguori     target_phys_addr_t len;
30746d16c2f8Saliguori } BounceBuffer;
30756d16c2f8Saliguori 
30766d16c2f8Saliguori static BounceBuffer bounce;
30776d16c2f8Saliguori 
3078ba223c29Saliguori typedef struct MapClient {
3079ba223c29Saliguori     void *opaque;
3080ba223c29Saliguori     void (*callback)(void *opaque);
3081ba223c29Saliguori     LIST_ENTRY(MapClient) link;
3082ba223c29Saliguori } MapClient;
3083ba223c29Saliguori 
3084ba223c29Saliguori static LIST_HEAD(map_client_list, MapClient) map_client_list
3085ba223c29Saliguori     = LIST_HEAD_INITIALIZER(map_client_list);
3086ba223c29Saliguori 
3087ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3088ba223c29Saliguori {
3089ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3090ba223c29Saliguori 
3091ba223c29Saliguori     client->opaque = opaque;
3092ba223c29Saliguori     client->callback = callback;
3093ba223c29Saliguori     LIST_INSERT_HEAD(&map_client_list, client, link);
3094ba223c29Saliguori     return client;
3095ba223c29Saliguori }
3096ba223c29Saliguori 
3097ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3098ba223c29Saliguori {
3099ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3100ba223c29Saliguori 
3101ba223c29Saliguori     LIST_REMOVE(client, link);
3102ba223c29Saliguori }
3103ba223c29Saliguori 
3104ba223c29Saliguori static void cpu_notify_map_clients(void)
3105ba223c29Saliguori {
3106ba223c29Saliguori     MapClient *client;
3107ba223c29Saliguori 
3108ba223c29Saliguori     while (!LIST_EMPTY(&map_client_list)) {
3109ba223c29Saliguori         client = LIST_FIRST(&map_client_list);
3110ba223c29Saliguori         client->callback(client->opaque);
3111ba223c29Saliguori         LIST_REMOVE(client, link);
3112ba223c29Saliguori     }
3113ba223c29Saliguori }
3114ba223c29Saliguori 
31156d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
31166d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
31176d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
31186d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3119ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3120ba223c29Saliguori  * likely to succeed.
31216d16c2f8Saliguori  */
31226d16c2f8Saliguori void *cpu_physical_memory_map(target_phys_addr_t addr,
31236d16c2f8Saliguori                               target_phys_addr_t *plen,
31246d16c2f8Saliguori                               int is_write)
31256d16c2f8Saliguori {
31266d16c2f8Saliguori     target_phys_addr_t len = *plen;
31276d16c2f8Saliguori     target_phys_addr_t done = 0;
31286d16c2f8Saliguori     int l;
31296d16c2f8Saliguori     uint8_t *ret = NULL;
31306d16c2f8Saliguori     uint8_t *ptr;
31316d16c2f8Saliguori     target_phys_addr_t page;
31326d16c2f8Saliguori     unsigned long pd;
31336d16c2f8Saliguori     PhysPageDesc *p;
31346d16c2f8Saliguori     unsigned long addr1;
31356d16c2f8Saliguori 
31366d16c2f8Saliguori     while (len > 0) {
31376d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
31386d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
31396d16c2f8Saliguori         if (l > len)
31406d16c2f8Saliguori             l = len;
31416d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
31426d16c2f8Saliguori         if (!p) {
31436d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
31446d16c2f8Saliguori         } else {
31456d16c2f8Saliguori             pd = p->phys_offset;
31466d16c2f8Saliguori         }
31476d16c2f8Saliguori 
31486d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
31496d16c2f8Saliguori             if (done || bounce.buffer) {
31506d16c2f8Saliguori                 break;
31516d16c2f8Saliguori             }
31526d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
31536d16c2f8Saliguori             bounce.addr = addr;
31546d16c2f8Saliguori             bounce.len = l;
31556d16c2f8Saliguori             if (!is_write) {
31566d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
31576d16c2f8Saliguori             }
31586d16c2f8Saliguori             ptr = bounce.buffer;
31596d16c2f8Saliguori         } else {
31606d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
31616d16c2f8Saliguori             ptr = phys_ram_base + addr1;
31626d16c2f8Saliguori         }
31636d16c2f8Saliguori         if (!done) {
31646d16c2f8Saliguori             ret = ptr;
31656d16c2f8Saliguori         } else if (ret + done != ptr) {
31666d16c2f8Saliguori             break;
31676d16c2f8Saliguori         }
31686d16c2f8Saliguori 
31696d16c2f8Saliguori         len -= l;
31706d16c2f8Saliguori         addr += l;
31716d16c2f8Saliguori         done += l;
31726d16c2f8Saliguori     }
31736d16c2f8Saliguori     *plen = done;
31746d16c2f8Saliguori     return ret;
31756d16c2f8Saliguori }
31766d16c2f8Saliguori 
31776d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
31786d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
31796d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
31806d16c2f8Saliguori  */
31816d16c2f8Saliguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
31826d16c2f8Saliguori                                int is_write, target_phys_addr_t access_len)
31836d16c2f8Saliguori {
31846d16c2f8Saliguori     if (buffer != bounce.buffer) {
31856d16c2f8Saliguori         if (is_write) {
31866d16c2f8Saliguori             unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
31876d16c2f8Saliguori             while (access_len) {
31886d16c2f8Saliguori                 unsigned l;
31896d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
31906d16c2f8Saliguori                 if (l > access_len)
31916d16c2f8Saliguori                     l = access_len;
31926d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
31936d16c2f8Saliguori                     /* invalidate code */
31946d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
31956d16c2f8Saliguori                     /* set dirty bit */
31966d16c2f8Saliguori                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
31976d16c2f8Saliguori                         (0xff & ~CODE_DIRTY_FLAG);
31986d16c2f8Saliguori                 }
31996d16c2f8Saliguori                 addr1 += l;
32006d16c2f8Saliguori                 access_len -= l;
32016d16c2f8Saliguori             }
32026d16c2f8Saliguori         }
32036d16c2f8Saliguori         return;
32046d16c2f8Saliguori     }
32056d16c2f8Saliguori     if (is_write) {
32066d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
32076d16c2f8Saliguori     }
32086d16c2f8Saliguori     qemu_free(bounce.buffer);
32096d16c2f8Saliguori     bounce.buffer = NULL;
3210ba223c29Saliguori     cpu_notify_map_clients();
32116d16c2f8Saliguori }
3212d0ecd2aaSbellard 
32138df1cd07Sbellard /* warning: addr must be aligned */
32148df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
32158df1cd07Sbellard {
32168df1cd07Sbellard     int io_index;
32178df1cd07Sbellard     uint8_t *ptr;
32188df1cd07Sbellard     uint32_t val;
32198df1cd07Sbellard     unsigned long pd;
32208df1cd07Sbellard     PhysPageDesc *p;
32218df1cd07Sbellard 
32228df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
32238df1cd07Sbellard     if (!p) {
32248df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
32258df1cd07Sbellard     } else {
32268df1cd07Sbellard         pd = p->phys_offset;
32278df1cd07Sbellard     }
32288df1cd07Sbellard 
32292a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
32302a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
32318df1cd07Sbellard         /* I/O case */
32328df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
32338da3ff18Spbrook         if (p)
32348da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
32358df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
32368df1cd07Sbellard     } else {
32378df1cd07Sbellard         /* RAM case */
32388df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
32398df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
32408df1cd07Sbellard         val = ldl_p(ptr);
32418df1cd07Sbellard     }
32428df1cd07Sbellard     return val;
32438df1cd07Sbellard }
32448df1cd07Sbellard 
324584b7b8e7Sbellard /* warning: addr must be aligned */
324684b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
324784b7b8e7Sbellard {
324884b7b8e7Sbellard     int io_index;
324984b7b8e7Sbellard     uint8_t *ptr;
325084b7b8e7Sbellard     uint64_t val;
325184b7b8e7Sbellard     unsigned long pd;
325284b7b8e7Sbellard     PhysPageDesc *p;
325384b7b8e7Sbellard 
325484b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
325584b7b8e7Sbellard     if (!p) {
325684b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
325784b7b8e7Sbellard     } else {
325884b7b8e7Sbellard         pd = p->phys_offset;
325984b7b8e7Sbellard     }
326084b7b8e7Sbellard 
32612a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
32622a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
326384b7b8e7Sbellard         /* I/O case */
326484b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
32658da3ff18Spbrook         if (p)
32668da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
326784b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
326884b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
326984b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
327084b7b8e7Sbellard #else
327184b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
327284b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
327384b7b8e7Sbellard #endif
327484b7b8e7Sbellard     } else {
327584b7b8e7Sbellard         /* RAM case */
327684b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
327784b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
327884b7b8e7Sbellard         val = ldq_p(ptr);
327984b7b8e7Sbellard     }
328084b7b8e7Sbellard     return val;
328184b7b8e7Sbellard }
328284b7b8e7Sbellard 
3283aab33094Sbellard /* XXX: optimize */
3284aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
3285aab33094Sbellard {
3286aab33094Sbellard     uint8_t val;
3287aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3288aab33094Sbellard     return val;
3289aab33094Sbellard }
3290aab33094Sbellard 
3291aab33094Sbellard /* XXX: optimize */
3292aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
3293aab33094Sbellard {
3294aab33094Sbellard     uint16_t val;
3295aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3296aab33094Sbellard     return tswap16(val);
3297aab33094Sbellard }
3298aab33094Sbellard 
32998df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
33008df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
33018df1cd07Sbellard    bits are used to track modified PTEs */
33028df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
33038df1cd07Sbellard {
33048df1cd07Sbellard     int io_index;
33058df1cd07Sbellard     uint8_t *ptr;
33068df1cd07Sbellard     unsigned long pd;
33078df1cd07Sbellard     PhysPageDesc *p;
33088df1cd07Sbellard 
33098df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33108df1cd07Sbellard     if (!p) {
33118df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33128df1cd07Sbellard     } else {
33138df1cd07Sbellard         pd = p->phys_offset;
33148df1cd07Sbellard     }
33158df1cd07Sbellard 
33163a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
33178df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33188da3ff18Spbrook         if (p)
33198da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33208df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
33218df1cd07Sbellard     } else {
332274576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
332374576198Saliguori         ptr = phys_ram_base + addr1;
33248df1cd07Sbellard         stl_p(ptr, val);
332574576198Saliguori 
332674576198Saliguori         if (unlikely(in_migration)) {
332774576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
332874576198Saliguori                 /* invalidate code */
332974576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
333074576198Saliguori                 /* set dirty bit */
333174576198Saliguori                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
333274576198Saliguori                     (0xff & ~CODE_DIRTY_FLAG);
333374576198Saliguori             }
333474576198Saliguori         }
33358df1cd07Sbellard     }
33368df1cd07Sbellard }
33378df1cd07Sbellard 
3338bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3339bc98a7efSj_mayer {
3340bc98a7efSj_mayer     int io_index;
3341bc98a7efSj_mayer     uint8_t *ptr;
3342bc98a7efSj_mayer     unsigned long pd;
3343bc98a7efSj_mayer     PhysPageDesc *p;
3344bc98a7efSj_mayer 
3345bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3346bc98a7efSj_mayer     if (!p) {
3347bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3348bc98a7efSj_mayer     } else {
3349bc98a7efSj_mayer         pd = p->phys_offset;
3350bc98a7efSj_mayer     }
3351bc98a7efSj_mayer 
3352bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3353bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33548da3ff18Spbrook         if (p)
33558da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3356bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3357bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3358bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3359bc98a7efSj_mayer #else
3360bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3361bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3362bc98a7efSj_mayer #endif
3363bc98a7efSj_mayer     } else {
3364bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3365bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3366bc98a7efSj_mayer         stq_p(ptr, val);
3367bc98a7efSj_mayer     }
3368bc98a7efSj_mayer }
3369bc98a7efSj_mayer 
33708df1cd07Sbellard /* warning: addr must be aligned */
33718df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
33728df1cd07Sbellard {
33738df1cd07Sbellard     int io_index;
33748df1cd07Sbellard     uint8_t *ptr;
33758df1cd07Sbellard     unsigned long pd;
33768df1cd07Sbellard     PhysPageDesc *p;
33778df1cd07Sbellard 
33788df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33798df1cd07Sbellard     if (!p) {
33808df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33818df1cd07Sbellard     } else {
33828df1cd07Sbellard         pd = p->phys_offset;
33838df1cd07Sbellard     }
33848df1cd07Sbellard 
33853a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
33868df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33878da3ff18Spbrook         if (p)
33888da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33898df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
33908df1cd07Sbellard     } else {
33918df1cd07Sbellard         unsigned long addr1;
33928df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
33938df1cd07Sbellard         /* RAM case */
33948df1cd07Sbellard         ptr = phys_ram_base + addr1;
33958df1cd07Sbellard         stl_p(ptr, val);
33963a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
33978df1cd07Sbellard             /* invalidate code */
33988df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
33998df1cd07Sbellard             /* set dirty bit */
3400f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3401f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
34028df1cd07Sbellard         }
34038df1cd07Sbellard     }
34043a7d929eSbellard }
34058df1cd07Sbellard 
3406aab33094Sbellard /* XXX: optimize */
3407aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
3408aab33094Sbellard {
3409aab33094Sbellard     uint8_t v = val;
3410aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3411aab33094Sbellard }
3412aab33094Sbellard 
3413aab33094Sbellard /* XXX: optimize */
3414aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
3415aab33094Sbellard {
3416aab33094Sbellard     uint16_t v = tswap16(val);
3417aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3418aab33094Sbellard }
3419aab33094Sbellard 
3420aab33094Sbellard /* XXX: optimize */
3421aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
3422aab33094Sbellard {
3423aab33094Sbellard     val = tswap64(val);
3424aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3425aab33094Sbellard }
3426aab33094Sbellard 
342713eb76e0Sbellard #endif
342813eb76e0Sbellard 
342913eb76e0Sbellard /* virtual memory access for debug */
3430b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3431b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
343213eb76e0Sbellard {
343313eb76e0Sbellard     int l;
34349b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
34359b3c35e0Sj_mayer     target_ulong page;
343613eb76e0Sbellard 
343713eb76e0Sbellard     while (len > 0) {
343813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
343913eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
344013eb76e0Sbellard         /* if no physical page mapped, return an error */
344113eb76e0Sbellard         if (phys_addr == -1)
344213eb76e0Sbellard             return -1;
344313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
344413eb76e0Sbellard         if (l > len)
344513eb76e0Sbellard             l = len;
3446b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3447b448f2f3Sbellard                                buf, l, is_write);
344813eb76e0Sbellard         len -= l;
344913eb76e0Sbellard         buf += l;
345013eb76e0Sbellard         addr += l;
345113eb76e0Sbellard     }
345213eb76e0Sbellard     return 0;
345313eb76e0Sbellard }
345413eb76e0Sbellard 
34552e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
34562e70f6efSpbrook    must be at the end of the TB */
34572e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
34582e70f6efSpbrook {
34592e70f6efSpbrook     TranslationBlock *tb;
34602e70f6efSpbrook     uint32_t n, cflags;
34612e70f6efSpbrook     target_ulong pc, cs_base;
34622e70f6efSpbrook     uint64_t flags;
34632e70f6efSpbrook 
34642e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
34652e70f6efSpbrook     if (!tb) {
34662e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
34672e70f6efSpbrook                   retaddr);
34682e70f6efSpbrook     }
34692e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
34702e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
34712e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3472bf20dc07Sths        occurred.  */
34732e70f6efSpbrook     n = n - env->icount_decr.u16.low;
34742e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
34752e70f6efSpbrook     n++;
34762e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
34772e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3478bf20dc07Sths        the first instruction in a TB then re-execute the preceding
34792e70f6efSpbrook        branch.  */
34802e70f6efSpbrook #if defined(TARGET_MIPS)
34812e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
34822e70f6efSpbrook         env->active_tc.PC -= 4;
34832e70f6efSpbrook         env->icount_decr.u16.low++;
34842e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
34852e70f6efSpbrook     }
34862e70f6efSpbrook #elif defined(TARGET_SH4)
34872e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
34882e70f6efSpbrook             && n > 1) {
34892e70f6efSpbrook         env->pc -= 2;
34902e70f6efSpbrook         env->icount_decr.u16.low++;
34912e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
34922e70f6efSpbrook     }
34932e70f6efSpbrook #endif
34942e70f6efSpbrook     /* This should never happen.  */
34952e70f6efSpbrook     if (n > CF_COUNT_MASK)
34962e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
34972e70f6efSpbrook 
34982e70f6efSpbrook     cflags = n | CF_LAST_IO;
34992e70f6efSpbrook     pc = tb->pc;
35002e70f6efSpbrook     cs_base = tb->cs_base;
35012e70f6efSpbrook     flags = tb->flags;
35022e70f6efSpbrook     tb_phys_invalidate(tb, -1);
35032e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
35042e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
35052e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3506bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
35072e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
35082e70f6efSpbrook        repeating the fault, which is horribly inefficient.
35092e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
35102e70f6efSpbrook        second new TB.  */
35112e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
35122e70f6efSpbrook }
35132e70f6efSpbrook 
3514e3db7226Sbellard void dump_exec_info(FILE *f,
3515e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3516e3db7226Sbellard {
3517e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3518e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3519e3db7226Sbellard     TranslationBlock *tb;
3520e3db7226Sbellard 
3521e3db7226Sbellard     target_code_size = 0;
3522e3db7226Sbellard     max_target_code_size = 0;
3523e3db7226Sbellard     cross_page = 0;
3524e3db7226Sbellard     direct_jmp_count = 0;
3525e3db7226Sbellard     direct_jmp2_count = 0;
3526e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3527e3db7226Sbellard         tb = &tbs[i];
3528e3db7226Sbellard         target_code_size += tb->size;
3529e3db7226Sbellard         if (tb->size > max_target_code_size)
3530e3db7226Sbellard             max_target_code_size = tb->size;
3531e3db7226Sbellard         if (tb->page_addr[1] != -1)
3532e3db7226Sbellard             cross_page++;
3533e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3534e3db7226Sbellard             direct_jmp_count++;
3535e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3536e3db7226Sbellard                 direct_jmp2_count++;
3537e3db7226Sbellard             }
3538e3db7226Sbellard         }
3539e3db7226Sbellard     }
3540e3db7226Sbellard     /* XXX: avoid using doubles ? */
354157fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
354226a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
354326a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
354426a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
354526a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3546e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3547e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3548e3db7226Sbellard                 max_target_code_size);
3549e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3550e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3551e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3552e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3553e3db7226Sbellard             cross_page,
3554e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3555e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3556e3db7226Sbellard                 direct_jmp_count,
3557e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3558e3db7226Sbellard                 direct_jmp2_count,
3559e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
356057fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3561e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3562e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3563e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3564b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3565e3db7226Sbellard }
3566e3db7226Sbellard 
356761382a50Sbellard #if !defined(CONFIG_USER_ONLY)
356861382a50Sbellard 
356961382a50Sbellard #define MMUSUFFIX _cmmu
357061382a50Sbellard #define GETPC() NULL
357161382a50Sbellard #define env cpu_single_env
3572b769d8feSbellard #define SOFTMMU_CODE_ACCESS
357361382a50Sbellard 
357461382a50Sbellard #define SHIFT 0
357561382a50Sbellard #include "softmmu_template.h"
357661382a50Sbellard 
357761382a50Sbellard #define SHIFT 1
357861382a50Sbellard #include "softmmu_template.h"
357961382a50Sbellard 
358061382a50Sbellard #define SHIFT 2
358161382a50Sbellard #include "softmmu_template.h"
358261382a50Sbellard 
358361382a50Sbellard #define SHIFT 3
358461382a50Sbellard #include "softmmu_template.h"
358561382a50Sbellard 
358661382a50Sbellard #undef env
358761382a50Sbellard 
358861382a50Sbellard #endif
3589