xref: /qemu/system/physmem.c (revision b5fc909e020fa97891ed92b0f3118efe9d35ecd7)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
224fddf62aSths #define WIN32_LEAN_AND_MEAN
23d5a8f07cSbellard #include <windows.h>
24d5a8f07cSbellard #else
25a98d49b1Sbellard #include <sys/types.h>
26d5a8f07cSbellard #include <sys/mman.h>
27d5a8f07cSbellard #endif
2854936004Sbellard #include <stdlib.h>
2954936004Sbellard #include <stdio.h>
3054936004Sbellard #include <stdarg.h>
3154936004Sbellard #include <string.h>
3254936004Sbellard #include <errno.h>
3354936004Sbellard #include <unistd.h>
3454936004Sbellard #include <inttypes.h>
3554936004Sbellard 
366180a181Sbellard #include "cpu.h"
376180a181Sbellard #include "exec-all.h"
38ca10f867Saurel32 #include "qemu-common.h"
39b67d9a52Sbellard #include "tcg.h"
4053a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4153a5960aSpbrook #include <qemu.h>
4253a5960aSpbrook #endif
4354936004Sbellard 
44fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4566e85a21Sbellard //#define DEBUG_FLUSH
469fa3e853Sbellard //#define DEBUG_TLB
4767d3b957Spbrook //#define DEBUG_UNASSIGNED
48fd6ce8f6Sbellard 
49fd6ce8f6Sbellard /* make various TB consistency checks */
50fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5198857888Sbellard //#define DEBUG_TLB_CHECK
52fd6ce8f6Sbellard 
531196be37Sths //#define DEBUG_IOPORT
54db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
551196be37Sths 
5699773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5799773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5899773bd4Spbrook #undef DEBUG_TB_CHECK
5999773bd4Spbrook #endif
6099773bd4Spbrook 
619fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
629fa3e853Sbellard 
639fa3e853Sbellard #define MMAP_AREA_START        0x00000000
649fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
65fd6ce8f6Sbellard 
66108c49b8Sbellard #if defined(TARGET_SPARC64)
67108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
685dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
695dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
71bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
72bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
73108c49b8Sbellard #elif defined(TARGET_PPC64)
74108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
7500f82b8aSaurel32 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
7600f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
7700f82b8aSaurel32 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
7800f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79108c49b8Sbellard #else
80108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
82108c49b8Sbellard #endif
83108c49b8Sbellard 
84fab94c0eSpbrook TranslationBlock *tbs;
8526a5f13bSbellard int code_gen_max_blocks;
869fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87fd6ce8f6Sbellard int nb_tbs;
88eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
89eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90fd6ce8f6Sbellard 
917cb69caeSbellard uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
9226a5f13bSbellard uint8_t *code_gen_buffer;
9326a5f13bSbellard unsigned long code_gen_buffer_size;
9426a5f13bSbellard /* threshold to flush the translated code buffer */
9526a5f13bSbellard unsigned long code_gen_buffer_max_size;
96fd6ce8f6Sbellard uint8_t *code_gen_ptr;
97fd6ce8f6Sbellard 
9800f82b8aSaurel32 ram_addr_t phys_ram_size;
999fa3e853Sbellard int phys_ram_fd;
1009fa3e853Sbellard uint8_t *phys_ram_base;
1011ccde1cbSbellard uint8_t *phys_ram_dirty;
102e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
1039fa3e853Sbellard 
1046a00d601Sbellard CPUState *first_cpu;
1056a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1066a00d601Sbellard    cpu_exec() */
1076a00d601Sbellard CPUState *cpu_single_env;
1086a00d601Sbellard 
10954936004Sbellard typedef struct PageDesc {
11092e873b9Sbellard     /* list of TBs intersecting this ram page */
111fd6ce8f6Sbellard     TranslationBlock *first_tb;
1129fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1139fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1149fa3e853Sbellard     unsigned int code_write_count;
1159fa3e853Sbellard     uint8_t *code_bitmap;
1169fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1179fa3e853Sbellard     unsigned long flags;
1189fa3e853Sbellard #endif
11954936004Sbellard } PageDesc;
12054936004Sbellard 
12192e873b9Sbellard typedef struct PhysPageDesc {
12292e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
12300f82b8aSaurel32     ram_addr_t phys_offset;
12492e873b9Sbellard } PhysPageDesc;
12592e873b9Sbellard 
12654936004Sbellard #define L2_BITS 10
127bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
129bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
130bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
131bedb69eaSj_mayer  */
132bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133bedb69eaSj_mayer #else
13403875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135bedb69eaSj_mayer #endif
13654936004Sbellard 
13754936004Sbellard #define L1_SIZE (1 << L1_BITS)
13854936004Sbellard #define L2_SIZE (1 << L2_BITS)
13954936004Sbellard 
14033417e70Sbellard static void io_mem_init(void);
141fd6ce8f6Sbellard 
14283fb7adfSbellard unsigned long qemu_real_host_page_size;
14383fb7adfSbellard unsigned long qemu_host_page_bits;
14483fb7adfSbellard unsigned long qemu_host_page_size;
14583fb7adfSbellard unsigned long qemu_host_page_mask;
14654936004Sbellard 
14792e873b9Sbellard /* XXX: for system emulation, it could just be an array */
14854936004Sbellard static PageDesc *l1_map[L1_SIZE];
1490a962c02Sbellard PhysPageDesc **l1_phys_map;
15054936004Sbellard 
15133417e70Sbellard /* io memory support */
15233417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
15333417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
15533417e70Sbellard static int io_mem_nb;
1566658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
1576658ffb8Spbrook static int io_mem_watch;
1586658ffb8Spbrook #endif
15933417e70Sbellard 
16034865134Sbellard /* log support */
16134865134Sbellard char *logfilename = "/tmp/qemu.log";
16234865134Sbellard FILE *logfile;
16334865134Sbellard int loglevel;
164e735b91cSpbrook static int log_append = 0;
16534865134Sbellard 
166e3db7226Sbellard /* statistics */
167e3db7226Sbellard static int tlb_flush_count;
168e3db7226Sbellard static int tb_flush_count;
169e3db7226Sbellard static int tb_phys_invalidate_count;
170e3db7226Sbellard 
171db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172db7b5426Sblueswir1 typedef struct subpage_t {
173db7b5426Sblueswir1     target_phys_addr_t base;
1743ee89922Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
1753ee89922Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
1763ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
177db7b5426Sblueswir1 } subpage_t;
178db7b5426Sblueswir1 
1797cb69caeSbellard #ifdef _WIN32
1807cb69caeSbellard static void map_exec(void *addr, long size)
1817cb69caeSbellard {
1827cb69caeSbellard     DWORD old_protect;
1837cb69caeSbellard     VirtualProtect(addr, size,
1847cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
1857cb69caeSbellard 
1867cb69caeSbellard }
1877cb69caeSbellard #else
1887cb69caeSbellard static void map_exec(void *addr, long size)
1897cb69caeSbellard {
1904369415fSbellard     unsigned long start, end, page_size;
1917cb69caeSbellard 
1924369415fSbellard     page_size = getpagesize();
1937cb69caeSbellard     start = (unsigned long)addr;
1944369415fSbellard     start &= ~(page_size - 1);
1957cb69caeSbellard 
1967cb69caeSbellard     end = (unsigned long)addr + size;
1974369415fSbellard     end += page_size - 1;
1984369415fSbellard     end &= ~(page_size - 1);
1997cb69caeSbellard 
2007cb69caeSbellard     mprotect((void *)start, end - start,
2017cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2027cb69caeSbellard }
2037cb69caeSbellard #endif
2047cb69caeSbellard 
205b346ff46Sbellard static void page_init(void)
20654936004Sbellard {
20783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
20854936004Sbellard        TARGET_PAGE_SIZE */
20967b915a5Sbellard #ifdef _WIN32
210d5a8f07cSbellard     {
211d5a8f07cSbellard         SYSTEM_INFO system_info;
212d5a8f07cSbellard         DWORD old_protect;
213d5a8f07cSbellard 
214d5a8f07cSbellard         GetSystemInfo(&system_info);
215d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
216d5a8f07cSbellard     }
21767b915a5Sbellard #else
21883fb7adfSbellard     qemu_real_host_page_size = getpagesize();
21967b915a5Sbellard #endif
22083fb7adfSbellard     if (qemu_host_page_size == 0)
22183fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
22283fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
22383fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
22483fb7adfSbellard     qemu_host_page_bits = 0;
22583fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
22683fb7adfSbellard         qemu_host_page_bits++;
22783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
228108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
23050a9569bSbalrog 
23150a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
23250a9569bSbalrog     {
23350a9569bSbalrog         long long startaddr, endaddr;
23450a9569bSbalrog         FILE *f;
23550a9569bSbalrog         int n;
23650a9569bSbalrog 
23750a9569bSbalrog         f = fopen("/proc/self/maps", "r");
23850a9569bSbalrog         if (f) {
23950a9569bSbalrog             do {
24050a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
24150a9569bSbalrog                 if (n == 2) {
242e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
243e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
245e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
24750a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
24850a9569bSbalrog                                    PAGE_RESERVED);
24950a9569bSbalrog                 }
25050a9569bSbalrog             } while (!feof(f));
25150a9569bSbalrog             fclose(f);
25250a9569bSbalrog         }
25350a9569bSbalrog     }
25450a9569bSbalrog #endif
25554936004Sbellard }
25654936004Sbellard 
25700f82b8aSaurel32 static inline PageDesc *page_find_alloc(target_ulong index)
25854936004Sbellard {
25954936004Sbellard     PageDesc **lp, *p;
26054936004Sbellard 
26154936004Sbellard     lp = &l1_map[index >> L2_BITS];
26254936004Sbellard     p = *lp;
26354936004Sbellard     if (!p) {
26454936004Sbellard         /* allocate if not found */
26559817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
26754936004Sbellard         *lp = p;
26854936004Sbellard     }
26954936004Sbellard     return p + (index & (L2_SIZE - 1));
27054936004Sbellard }
27154936004Sbellard 
27200f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
27354936004Sbellard {
27454936004Sbellard     PageDesc *p;
27554936004Sbellard 
27654936004Sbellard     p = l1_map[index >> L2_BITS];
27754936004Sbellard     if (!p)
27854936004Sbellard         return 0;
279fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
28054936004Sbellard }
28154936004Sbellard 
282108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
28392e873b9Sbellard {
284108c49b8Sbellard     void **lp, **p;
285e3f4e2a4Spbrook     PhysPageDesc *pd;
28692e873b9Sbellard 
287108c49b8Sbellard     p = (void **)l1_phys_map;
288108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
289108c49b8Sbellard 
290108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292108c49b8Sbellard #endif
293108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
29492e873b9Sbellard     p = *lp;
29592e873b9Sbellard     if (!p) {
29692e873b9Sbellard         /* allocate if not found */
297108c49b8Sbellard         if (!alloc)
298108c49b8Sbellard             return NULL;
299108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
301108c49b8Sbellard         *lp = p;
302108c49b8Sbellard     }
303108c49b8Sbellard #endif
304108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
305e3f4e2a4Spbrook     pd = *lp;
306e3f4e2a4Spbrook     if (!pd) {
307e3f4e2a4Spbrook         int i;
308108c49b8Sbellard         /* allocate if not found */
309108c49b8Sbellard         if (!alloc)
310108c49b8Sbellard             return NULL;
311e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312e3f4e2a4Spbrook         *lp = pd;
313e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
314e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
31592e873b9Sbellard     }
316e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
31792e873b9Sbellard }
31892e873b9Sbellard 
319108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
32092e873b9Sbellard {
321108c49b8Sbellard     return phys_page_find_alloc(index, 0);
32292e873b9Sbellard }
32392e873b9Sbellard 
3249fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3256a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3263a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3273a7d929eSbellard                                     target_ulong vaddr);
3289fa3e853Sbellard #endif
329fd6ce8f6Sbellard 
3304369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
3314369415fSbellard 
3324369415fSbellard #if defined(CONFIG_USER_ONLY)
3334369415fSbellard /* Currently it is not recommanded to allocate big chunks of data in
3344369415fSbellard    user mode. It will change when a dedicated libc will be used */
3354369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
3364369415fSbellard #endif
3374369415fSbellard 
3384369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
3394369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
3404369415fSbellard #endif
3414369415fSbellard 
34226a5f13bSbellard void code_gen_alloc(unsigned long tb_size)
34326a5f13bSbellard {
3444369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
3454369415fSbellard     code_gen_buffer = static_code_gen_buffer;
3464369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
3474369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
3484369415fSbellard #else
34926a5f13bSbellard     code_gen_buffer_size = tb_size;
35026a5f13bSbellard     if (code_gen_buffer_size == 0) {
3514369415fSbellard #if defined(CONFIG_USER_ONLY)
3524369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
3534369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
3544369415fSbellard #else
35526a5f13bSbellard         /* XXX: needs ajustments */
35626a5f13bSbellard         code_gen_buffer_size = (int)(phys_ram_size / 4);
3574369415fSbellard #endif
35826a5f13bSbellard     }
35926a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
36026a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
36126a5f13bSbellard     /* The code gen buffer location may have constraints depending on
36226a5f13bSbellard        the host cpu and OS */
36326a5f13bSbellard #if defined(__linux__)
36426a5f13bSbellard     {
36526a5f13bSbellard         int flags;
36626a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
36726a5f13bSbellard #if defined(__x86_64__)
36826a5f13bSbellard         flags |= MAP_32BIT;
36926a5f13bSbellard         /* Cannot map more than that */
37026a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
37126a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
37226a5f13bSbellard #endif
37326a5f13bSbellard         code_gen_buffer = mmap(NULL, code_gen_buffer_size,
37426a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
37526a5f13bSbellard                                flags, -1, 0);
37626a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
37726a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
37826a5f13bSbellard             exit(1);
37926a5f13bSbellard         }
38026a5f13bSbellard     }
38126a5f13bSbellard #else
38226a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
38326a5f13bSbellard     if (!code_gen_buffer) {
38426a5f13bSbellard         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
38526a5f13bSbellard         exit(1);
38626a5f13bSbellard     }
38726a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
38826a5f13bSbellard #endif
3894369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
39026a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
39126a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
39226a5f13bSbellard         code_gen_max_block_size();
39326a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
39426a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
39526a5f13bSbellard }
39626a5f13bSbellard 
39726a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
39826a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
39926a5f13bSbellard    size. */
40026a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
40126a5f13bSbellard {
40226a5f13bSbellard     cpu_gen_init();
40326a5f13bSbellard     code_gen_alloc(tb_size);
40426a5f13bSbellard     code_gen_ptr = code_gen_buffer;
4054369415fSbellard     page_init();
40626a5f13bSbellard     io_mem_init();
40726a5f13bSbellard }
40826a5f13bSbellard 
4096a00d601Sbellard void cpu_exec_init(CPUState *env)
410fd6ce8f6Sbellard {
4116a00d601Sbellard     CPUState **penv;
4126a00d601Sbellard     int cpu_index;
4136a00d601Sbellard 
4146a00d601Sbellard     env->next_cpu = NULL;
4156a00d601Sbellard     penv = &first_cpu;
4166a00d601Sbellard     cpu_index = 0;
4176a00d601Sbellard     while (*penv != NULL) {
4186a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
4196a00d601Sbellard         cpu_index++;
4206a00d601Sbellard     }
4216a00d601Sbellard     env->cpu_index = cpu_index;
4226658ffb8Spbrook     env->nb_watchpoints = 0;
4236a00d601Sbellard     *penv = env;
424fd6ce8f6Sbellard }
425fd6ce8f6Sbellard 
4269fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
4279fa3e853Sbellard {
4289fa3e853Sbellard     if (p->code_bitmap) {
42959817ccbSbellard         qemu_free(p->code_bitmap);
4309fa3e853Sbellard         p->code_bitmap = NULL;
4319fa3e853Sbellard     }
4329fa3e853Sbellard     p->code_write_count = 0;
4339fa3e853Sbellard }
4349fa3e853Sbellard 
435fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
436fd6ce8f6Sbellard static void page_flush_tb(void)
437fd6ce8f6Sbellard {
438fd6ce8f6Sbellard     int i, j;
439fd6ce8f6Sbellard     PageDesc *p;
440fd6ce8f6Sbellard 
441fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
442fd6ce8f6Sbellard         p = l1_map[i];
443fd6ce8f6Sbellard         if (p) {
4449fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
4459fa3e853Sbellard                 p->first_tb = NULL;
4469fa3e853Sbellard                 invalidate_page_bitmap(p);
4479fa3e853Sbellard                 p++;
4489fa3e853Sbellard             }
449fd6ce8f6Sbellard         }
450fd6ce8f6Sbellard     }
451fd6ce8f6Sbellard }
452fd6ce8f6Sbellard 
453fd6ce8f6Sbellard /* flush all the translation blocks */
454d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
4556a00d601Sbellard void tb_flush(CPUState *env1)
456fd6ce8f6Sbellard {
4576a00d601Sbellard     CPUState *env;
4580124311eSbellard #if defined(DEBUG_FLUSH)
459ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
460ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
461ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
462ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
463fd6ce8f6Sbellard #endif
46426a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
465a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
466a208e54aSpbrook 
467fd6ce8f6Sbellard     nb_tbs = 0;
4686a00d601Sbellard 
4696a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
4708a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
4716a00d601Sbellard     }
4729fa3e853Sbellard 
4738a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
474fd6ce8f6Sbellard     page_flush_tb();
4759fa3e853Sbellard 
476fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
477d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
478d4e8164fSbellard        expensive */
479e3db7226Sbellard     tb_flush_count++;
480fd6ce8f6Sbellard }
481fd6ce8f6Sbellard 
482fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
483fd6ce8f6Sbellard 
484bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
485fd6ce8f6Sbellard {
486fd6ce8f6Sbellard     TranslationBlock *tb;
487fd6ce8f6Sbellard     int i;
488fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
48999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
49099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
491fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
492fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
493fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
49499773bd4Spbrook                        address, (long)tb->pc, tb->size);
495fd6ce8f6Sbellard             }
496fd6ce8f6Sbellard         }
497fd6ce8f6Sbellard     }
498fd6ce8f6Sbellard }
499fd6ce8f6Sbellard 
500fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
501fd6ce8f6Sbellard static void tb_page_check(void)
502fd6ce8f6Sbellard {
503fd6ce8f6Sbellard     TranslationBlock *tb;
504fd6ce8f6Sbellard     int i, flags1, flags2;
505fd6ce8f6Sbellard 
50699773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
50799773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
508fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
509fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
510fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
511fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
51299773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
513fd6ce8f6Sbellard             }
514fd6ce8f6Sbellard         }
515fd6ce8f6Sbellard     }
516fd6ce8f6Sbellard }
517fd6ce8f6Sbellard 
518d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
519d4e8164fSbellard {
520d4e8164fSbellard     TranslationBlock *tb1;
521d4e8164fSbellard     unsigned int n1;
522d4e8164fSbellard 
523d4e8164fSbellard     /* suppress any remaining jumps to this TB */
524d4e8164fSbellard     tb1 = tb->jmp_first;
525d4e8164fSbellard     for(;;) {
526d4e8164fSbellard         n1 = (long)tb1 & 3;
527d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
528d4e8164fSbellard         if (n1 == 2)
529d4e8164fSbellard             break;
530d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
531d4e8164fSbellard     }
532d4e8164fSbellard     /* check end of list */
533d4e8164fSbellard     if (tb1 != tb) {
534d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
535d4e8164fSbellard     }
536d4e8164fSbellard }
537d4e8164fSbellard 
538fd6ce8f6Sbellard #endif
539fd6ce8f6Sbellard 
540fd6ce8f6Sbellard /* invalidate one TB */
541fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
542fd6ce8f6Sbellard                              int next_offset)
543fd6ce8f6Sbellard {
544fd6ce8f6Sbellard     TranslationBlock *tb1;
545fd6ce8f6Sbellard     for(;;) {
546fd6ce8f6Sbellard         tb1 = *ptb;
547fd6ce8f6Sbellard         if (tb1 == tb) {
548fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
549fd6ce8f6Sbellard             break;
550fd6ce8f6Sbellard         }
551fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
552fd6ce8f6Sbellard     }
553fd6ce8f6Sbellard }
554fd6ce8f6Sbellard 
5559fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
5569fa3e853Sbellard {
5579fa3e853Sbellard     TranslationBlock *tb1;
5589fa3e853Sbellard     unsigned int n1;
5599fa3e853Sbellard 
5609fa3e853Sbellard     for(;;) {
5619fa3e853Sbellard         tb1 = *ptb;
5629fa3e853Sbellard         n1 = (long)tb1 & 3;
5639fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
5649fa3e853Sbellard         if (tb1 == tb) {
5659fa3e853Sbellard             *ptb = tb1->page_next[n1];
5669fa3e853Sbellard             break;
5679fa3e853Sbellard         }
5689fa3e853Sbellard         ptb = &tb1->page_next[n1];
5699fa3e853Sbellard     }
5709fa3e853Sbellard }
5719fa3e853Sbellard 
572d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
573d4e8164fSbellard {
574d4e8164fSbellard     TranslationBlock *tb1, **ptb;
575d4e8164fSbellard     unsigned int n1;
576d4e8164fSbellard 
577d4e8164fSbellard     ptb = &tb->jmp_next[n];
578d4e8164fSbellard     tb1 = *ptb;
579d4e8164fSbellard     if (tb1) {
580d4e8164fSbellard         /* find tb(n) in circular list */
581d4e8164fSbellard         for(;;) {
582d4e8164fSbellard             tb1 = *ptb;
583d4e8164fSbellard             n1 = (long)tb1 & 3;
584d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
585d4e8164fSbellard             if (n1 == n && tb1 == tb)
586d4e8164fSbellard                 break;
587d4e8164fSbellard             if (n1 == 2) {
588d4e8164fSbellard                 ptb = &tb1->jmp_first;
589d4e8164fSbellard             } else {
590d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
591d4e8164fSbellard             }
592d4e8164fSbellard         }
593d4e8164fSbellard         /* now we can suppress tb(n) from the list */
594d4e8164fSbellard         *ptb = tb->jmp_next[n];
595d4e8164fSbellard 
596d4e8164fSbellard         tb->jmp_next[n] = NULL;
597d4e8164fSbellard     }
598d4e8164fSbellard }
599d4e8164fSbellard 
600d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
601d4e8164fSbellard    another TB */
602d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
603d4e8164fSbellard {
604d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
605d4e8164fSbellard }
606d4e8164fSbellard 
60700f82b8aSaurel32 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
608fd6ce8f6Sbellard {
6096a00d601Sbellard     CPUState *env;
610fd6ce8f6Sbellard     PageDesc *p;
6118a40a180Sbellard     unsigned int h, n1;
61200f82b8aSaurel32     target_phys_addr_t phys_pc;
6138a40a180Sbellard     TranslationBlock *tb1, *tb2;
614fd6ce8f6Sbellard 
6159fa3e853Sbellard     /* remove the TB from the hash list */
6169fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6179fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
6189fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
6199fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
6209fa3e853Sbellard 
6219fa3e853Sbellard     /* remove the TB from the page list */
6229fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
6239fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
6249fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
6259fa3e853Sbellard         invalidate_page_bitmap(p);
6269fa3e853Sbellard     }
6279fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
6289fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
6299fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
6309fa3e853Sbellard         invalidate_page_bitmap(p);
6319fa3e853Sbellard     }
6329fa3e853Sbellard 
6338a40a180Sbellard     tb_invalidated_flag = 1;
6348a40a180Sbellard 
6358a40a180Sbellard     /* remove the TB from the hash list */
6368a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
6376a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
6386a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
6396a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
6406a00d601Sbellard     }
6418a40a180Sbellard 
6428a40a180Sbellard     /* suppress this TB from the two jump lists */
6438a40a180Sbellard     tb_jmp_remove(tb, 0);
6448a40a180Sbellard     tb_jmp_remove(tb, 1);
6458a40a180Sbellard 
6468a40a180Sbellard     /* suppress any remaining jumps to this TB */
6478a40a180Sbellard     tb1 = tb->jmp_first;
6488a40a180Sbellard     for(;;) {
6498a40a180Sbellard         n1 = (long)tb1 & 3;
6508a40a180Sbellard         if (n1 == 2)
6518a40a180Sbellard             break;
6528a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
6538a40a180Sbellard         tb2 = tb1->jmp_next[n1];
6548a40a180Sbellard         tb_reset_jump(tb1, n1);
6558a40a180Sbellard         tb1->jmp_next[n1] = NULL;
6568a40a180Sbellard         tb1 = tb2;
6578a40a180Sbellard     }
6588a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
6598a40a180Sbellard 
660e3db7226Sbellard     tb_phys_invalidate_count++;
6619fa3e853Sbellard }
6629fa3e853Sbellard 
6639fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
6649fa3e853Sbellard {
6659fa3e853Sbellard     int end, mask, end1;
6669fa3e853Sbellard 
6679fa3e853Sbellard     end = start + len;
6689fa3e853Sbellard     tab += start >> 3;
6699fa3e853Sbellard     mask = 0xff << (start & 7);
6709fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
6719fa3e853Sbellard         if (start < end) {
6729fa3e853Sbellard             mask &= ~(0xff << (end & 7));
6739fa3e853Sbellard             *tab |= mask;
6749fa3e853Sbellard         }
6759fa3e853Sbellard     } else {
6769fa3e853Sbellard         *tab++ |= mask;
6779fa3e853Sbellard         start = (start + 8) & ~7;
6789fa3e853Sbellard         end1 = end & ~7;
6799fa3e853Sbellard         while (start < end1) {
6809fa3e853Sbellard             *tab++ = 0xff;
6819fa3e853Sbellard             start += 8;
6829fa3e853Sbellard         }
6839fa3e853Sbellard         if (start < end) {
6849fa3e853Sbellard             mask = ~(0xff << (end & 7));
6859fa3e853Sbellard             *tab |= mask;
6869fa3e853Sbellard         }
6879fa3e853Sbellard     }
6889fa3e853Sbellard }
6899fa3e853Sbellard 
6909fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
6919fa3e853Sbellard {
6929fa3e853Sbellard     int n, tb_start, tb_end;
6939fa3e853Sbellard     TranslationBlock *tb;
6949fa3e853Sbellard 
69559817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
6969fa3e853Sbellard     if (!p->code_bitmap)
6979fa3e853Sbellard         return;
6989fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
6999fa3e853Sbellard 
7009fa3e853Sbellard     tb = p->first_tb;
7019fa3e853Sbellard     while (tb != NULL) {
7029fa3e853Sbellard         n = (long)tb & 3;
7039fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
7049fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
7059fa3e853Sbellard         if (n == 0) {
7069fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
7079fa3e853Sbellard                it is not a problem */
7089fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
7099fa3e853Sbellard             tb_end = tb_start + tb->size;
7109fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
7119fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
7129fa3e853Sbellard         } else {
7139fa3e853Sbellard             tb_start = 0;
7149fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
7159fa3e853Sbellard         }
7169fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
7179fa3e853Sbellard         tb = tb->page_next[n];
7189fa3e853Sbellard     }
7199fa3e853Sbellard }
7209fa3e853Sbellard 
721d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
722d720b93dSbellard 
723d720b93dSbellard static void tb_gen_code(CPUState *env,
724d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
725d720b93dSbellard                         int cflags)
726d720b93dSbellard {
727d720b93dSbellard     TranslationBlock *tb;
728d720b93dSbellard     uint8_t *tc_ptr;
729d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
730d720b93dSbellard     int code_gen_size;
731d720b93dSbellard 
732c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
733c27004ecSbellard     tb = tb_alloc(pc);
734d720b93dSbellard     if (!tb) {
735d720b93dSbellard         /* flush must be done */
736d720b93dSbellard         tb_flush(env);
737d720b93dSbellard         /* cannot fail at this point */
738c27004ecSbellard         tb = tb_alloc(pc);
739d720b93dSbellard     }
740d720b93dSbellard     tc_ptr = code_gen_ptr;
741d720b93dSbellard     tb->tc_ptr = tc_ptr;
742d720b93dSbellard     tb->cs_base = cs_base;
743d720b93dSbellard     tb->flags = flags;
744d720b93dSbellard     tb->cflags = cflags;
745d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
746d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
747d720b93dSbellard 
748d720b93dSbellard     /* check next page if needed */
749c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
750d720b93dSbellard     phys_page2 = -1;
751c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
752d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
753d720b93dSbellard     }
754d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
755d720b93dSbellard }
756d720b93dSbellard #endif
757d720b93dSbellard 
7589fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
7599fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
760d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
761d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
762d720b93dSbellard    TB if code is modified inside this TB. */
76300f82b8aSaurel32 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
764d720b93dSbellard                                    int is_cpu_write_access)
7659fa3e853Sbellard {
766d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
767d720b93dSbellard     CPUState *env = cpu_single_env;
7689fa3e853Sbellard     PageDesc *p;
769ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
7709fa3e853Sbellard     target_ulong tb_start, tb_end;
771d720b93dSbellard     target_ulong current_pc, current_cs_base;
7729fa3e853Sbellard 
7739fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7749fa3e853Sbellard     if (!p)
7759fa3e853Sbellard         return;
7769fa3e853Sbellard     if (!p->code_bitmap &&
777d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
778d720b93dSbellard         is_cpu_write_access) {
7799fa3e853Sbellard         /* build code bitmap */
7809fa3e853Sbellard         build_page_bitmap(p);
7819fa3e853Sbellard     }
7829fa3e853Sbellard 
7839fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
7849fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
785d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
786d720b93dSbellard     current_tb_modified = 0;
787d720b93dSbellard     current_tb = NULL; /* avoid warning */
788d720b93dSbellard     current_pc = 0; /* avoid warning */
789d720b93dSbellard     current_cs_base = 0; /* avoid warning */
790d720b93dSbellard     current_flags = 0; /* avoid warning */
7919fa3e853Sbellard     tb = p->first_tb;
7929fa3e853Sbellard     while (tb != NULL) {
7939fa3e853Sbellard         n = (long)tb & 3;
7949fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
7959fa3e853Sbellard         tb_next = tb->page_next[n];
7969fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
7979fa3e853Sbellard         if (n == 0) {
7989fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
7999fa3e853Sbellard                it is not a problem */
8009fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8019fa3e853Sbellard             tb_end = tb_start + tb->size;
8029fa3e853Sbellard         } else {
8039fa3e853Sbellard             tb_start = tb->page_addr[1];
8049fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8059fa3e853Sbellard         }
8069fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
807d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
808d720b93dSbellard             if (current_tb_not_found) {
809d720b93dSbellard                 current_tb_not_found = 0;
810d720b93dSbellard                 current_tb = NULL;
811d720b93dSbellard                 if (env->mem_write_pc) {
812d720b93dSbellard                     /* now we have a real cpu fault */
813d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
814d720b93dSbellard                 }
815d720b93dSbellard             }
816d720b93dSbellard             if (current_tb == tb &&
817d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
818d720b93dSbellard                 /* If we are modifying the current TB, we must stop
819d720b93dSbellard                 its execution. We could be more precise by checking
820d720b93dSbellard                 that the modification is after the current PC, but it
821d720b93dSbellard                 would require a specialized function to partially
822d720b93dSbellard                 restore the CPU state */
823d720b93dSbellard 
824d720b93dSbellard                 current_tb_modified = 1;
825d720b93dSbellard                 cpu_restore_state(current_tb, env,
826d720b93dSbellard                                   env->mem_write_pc, NULL);
827d720b93dSbellard #if defined(TARGET_I386)
828d720b93dSbellard                 current_flags = env->hflags;
829d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
830d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
831d720b93dSbellard                 current_pc = current_cs_base + env->eip;
832d720b93dSbellard #else
833d720b93dSbellard #error unsupported CPU
834d720b93dSbellard #endif
835d720b93dSbellard             }
836d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
8376f5a9f7eSbellard             /* we need to do that to handle the case where a signal
8386f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
8396f5a9f7eSbellard             saved_tb = NULL;
8406f5a9f7eSbellard             if (env) {
841ea1c1802Sbellard                 saved_tb = env->current_tb;
842ea1c1802Sbellard                 env->current_tb = NULL;
8436f5a9f7eSbellard             }
8449fa3e853Sbellard             tb_phys_invalidate(tb, -1);
8456f5a9f7eSbellard             if (env) {
846ea1c1802Sbellard                 env->current_tb = saved_tb;
847ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
848ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
8499fa3e853Sbellard             }
8506f5a9f7eSbellard         }
8519fa3e853Sbellard         tb = tb_next;
8529fa3e853Sbellard     }
8539fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
8549fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
8559fa3e853Sbellard     if (!p->first_tb) {
8569fa3e853Sbellard         invalidate_page_bitmap(p);
857d720b93dSbellard         if (is_cpu_write_access) {
858d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
859d720b93dSbellard         }
860d720b93dSbellard     }
861d720b93dSbellard #endif
862d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
863d720b93dSbellard     if (current_tb_modified) {
864d720b93dSbellard         /* we generate a block containing just the instruction
865d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
866d720b93dSbellard            itself */
867ea1c1802Sbellard         env->current_tb = NULL;
868d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
869d720b93dSbellard                     CF_SINGLE_INSN);
870d720b93dSbellard         cpu_resume_from_signal(env, NULL);
8719fa3e853Sbellard     }
8729fa3e853Sbellard #endif
8739fa3e853Sbellard }
8749fa3e853Sbellard 
8759fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
87600f82b8aSaurel32 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
8779fa3e853Sbellard {
8789fa3e853Sbellard     PageDesc *p;
8799fa3e853Sbellard     int offset, b;
88059817ccbSbellard #if 0
881a4193c8aSbellard     if (1) {
882a4193c8aSbellard         if (loglevel) {
883a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
884a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
885a4193c8aSbellard                    cpu_single_env->eip,
886a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
887a4193c8aSbellard         }
88859817ccbSbellard     }
88959817ccbSbellard #endif
8909fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
8919fa3e853Sbellard     if (!p)
8929fa3e853Sbellard         return;
8939fa3e853Sbellard     if (p->code_bitmap) {
8949fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
8959fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
8969fa3e853Sbellard         if (b & ((1 << len) - 1))
8979fa3e853Sbellard             goto do_invalidate;
8989fa3e853Sbellard     } else {
8999fa3e853Sbellard     do_invalidate:
900d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
9019fa3e853Sbellard     }
9029fa3e853Sbellard }
9039fa3e853Sbellard 
9049fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
90500f82b8aSaurel32 static void tb_invalidate_phys_page(target_phys_addr_t addr,
906d720b93dSbellard                                     unsigned long pc, void *puc)
9079fa3e853Sbellard {
908d720b93dSbellard     int n, current_flags, current_tb_modified;
909d720b93dSbellard     target_ulong current_pc, current_cs_base;
9109fa3e853Sbellard     PageDesc *p;
911d720b93dSbellard     TranslationBlock *tb, *current_tb;
912d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
913d720b93dSbellard     CPUState *env = cpu_single_env;
914d720b93dSbellard #endif
9159fa3e853Sbellard 
9169fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
9179fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
918fd6ce8f6Sbellard     if (!p)
919fd6ce8f6Sbellard         return;
920fd6ce8f6Sbellard     tb = p->first_tb;
921d720b93dSbellard     current_tb_modified = 0;
922d720b93dSbellard     current_tb = NULL;
923d720b93dSbellard     current_pc = 0; /* avoid warning */
924d720b93dSbellard     current_cs_base = 0; /* avoid warning */
925d720b93dSbellard     current_flags = 0; /* avoid warning */
926d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
927d720b93dSbellard     if (tb && pc != 0) {
928d720b93dSbellard         current_tb = tb_find_pc(pc);
929d720b93dSbellard     }
930d720b93dSbellard #endif
931fd6ce8f6Sbellard     while (tb != NULL) {
9329fa3e853Sbellard         n = (long)tb & 3;
9339fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
934d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
935d720b93dSbellard         if (current_tb == tb &&
936d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
937d720b93dSbellard                 /* If we are modifying the current TB, we must stop
938d720b93dSbellard                    its execution. We could be more precise by checking
939d720b93dSbellard                    that the modification is after the current PC, but it
940d720b93dSbellard                    would require a specialized function to partially
941d720b93dSbellard                    restore the CPU state */
942d720b93dSbellard 
943d720b93dSbellard             current_tb_modified = 1;
944d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
945d720b93dSbellard #if defined(TARGET_I386)
946d720b93dSbellard             current_flags = env->hflags;
947d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
948d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
949d720b93dSbellard             current_pc = current_cs_base + env->eip;
950d720b93dSbellard #else
951d720b93dSbellard #error unsupported CPU
952d720b93dSbellard #endif
953d720b93dSbellard         }
954d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9559fa3e853Sbellard         tb_phys_invalidate(tb, addr);
9569fa3e853Sbellard         tb = tb->page_next[n];
957fd6ce8f6Sbellard     }
958fd6ce8f6Sbellard     p->first_tb = NULL;
959d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
960d720b93dSbellard     if (current_tb_modified) {
961d720b93dSbellard         /* we generate a block containing just the instruction
962d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
963d720b93dSbellard            itself */
964ea1c1802Sbellard         env->current_tb = NULL;
965d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
966d720b93dSbellard                     CF_SINGLE_INSN);
967d720b93dSbellard         cpu_resume_from_signal(env, puc);
968d720b93dSbellard     }
969d720b93dSbellard #endif
970fd6ce8f6Sbellard }
9719fa3e853Sbellard #endif
972fd6ce8f6Sbellard 
973fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
9749fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
97553a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
976fd6ce8f6Sbellard {
977fd6ce8f6Sbellard     PageDesc *p;
9789fa3e853Sbellard     TranslationBlock *last_first_tb;
9799fa3e853Sbellard 
9809fa3e853Sbellard     tb->page_addr[n] = page_addr;
9813a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9829fa3e853Sbellard     tb->page_next[n] = p->first_tb;
9839fa3e853Sbellard     last_first_tb = p->first_tb;
9849fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
9859fa3e853Sbellard     invalidate_page_bitmap(p);
9869fa3e853Sbellard 
987107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
988d720b93dSbellard 
9899fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
9909fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
99153a5960aSpbrook         target_ulong addr;
99253a5960aSpbrook         PageDesc *p2;
993fd6ce8f6Sbellard         int prot;
994fd6ce8f6Sbellard 
995fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
996fd6ce8f6Sbellard            page fault + mprotect overhead) */
99753a5960aSpbrook         page_addr &= qemu_host_page_mask;
998fd6ce8f6Sbellard         prot = 0;
99953a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
100053a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
100153a5960aSpbrook 
100253a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
100353a5960aSpbrook             if (!p2)
100453a5960aSpbrook                 continue;
100553a5960aSpbrook             prot |= p2->flags;
100653a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
100753a5960aSpbrook             page_get_flags(addr);
100853a5960aSpbrook           }
100953a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1010fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1011fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1012ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
101353a5960aSpbrook                page_addr);
1014fd6ce8f6Sbellard #endif
1015fd6ce8f6Sbellard     }
10169fa3e853Sbellard #else
10179fa3e853Sbellard     /* if some code is already present, then the pages are already
10189fa3e853Sbellard        protected. So we handle the case where only the first TB is
10199fa3e853Sbellard        allocated in a physical page */
10209fa3e853Sbellard     if (!last_first_tb) {
10216a00d601Sbellard         tlb_protect_code(page_addr);
10229fa3e853Sbellard     }
10239fa3e853Sbellard #endif
1024d720b93dSbellard 
1025d720b93dSbellard #endif /* TARGET_HAS_SMC */
1026fd6ce8f6Sbellard }
1027fd6ce8f6Sbellard 
1028fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1029fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1030c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1031fd6ce8f6Sbellard {
1032fd6ce8f6Sbellard     TranslationBlock *tb;
1033fd6ce8f6Sbellard 
103426a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
103526a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1036d4e8164fSbellard         return NULL;
1037fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1038fd6ce8f6Sbellard     tb->pc = pc;
1039b448f2f3Sbellard     tb->cflags = 0;
1040d4e8164fSbellard     return tb;
1041d4e8164fSbellard }
1042d4e8164fSbellard 
10439fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
10449fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
10459fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
10469fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1047d4e8164fSbellard {
10489fa3e853Sbellard     unsigned int h;
10499fa3e853Sbellard     TranslationBlock **ptb;
10509fa3e853Sbellard 
10519fa3e853Sbellard     /* add in the physical hash table */
10529fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
10539fa3e853Sbellard     ptb = &tb_phys_hash[h];
10549fa3e853Sbellard     tb->phys_hash_next = *ptb;
10559fa3e853Sbellard     *ptb = tb;
1056fd6ce8f6Sbellard 
1057fd6ce8f6Sbellard     /* add in the page list */
10589fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
10599fa3e853Sbellard     if (phys_page2 != -1)
10609fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
10619fa3e853Sbellard     else
10629fa3e853Sbellard         tb->page_addr[1] = -1;
10639fa3e853Sbellard 
1064d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1065d4e8164fSbellard     tb->jmp_next[0] = NULL;
1066d4e8164fSbellard     tb->jmp_next[1] = NULL;
1067d4e8164fSbellard 
1068d4e8164fSbellard     /* init original jump addresses */
1069d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1070d4e8164fSbellard         tb_reset_jump(tb, 0);
1071d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1072d4e8164fSbellard         tb_reset_jump(tb, 1);
10738a40a180Sbellard 
10748a40a180Sbellard #ifdef DEBUG_TB_CHECK
10758a40a180Sbellard     tb_page_check();
10768a40a180Sbellard #endif
1077fd6ce8f6Sbellard }
1078fd6ce8f6Sbellard 
1079a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1080a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1081a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1082a513fe19Sbellard {
1083a513fe19Sbellard     int m_min, m_max, m;
1084a513fe19Sbellard     unsigned long v;
1085a513fe19Sbellard     TranslationBlock *tb;
1086a513fe19Sbellard 
1087a513fe19Sbellard     if (nb_tbs <= 0)
1088a513fe19Sbellard         return NULL;
1089a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1090a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1091a513fe19Sbellard         return NULL;
1092a513fe19Sbellard     /* binary search (cf Knuth) */
1093a513fe19Sbellard     m_min = 0;
1094a513fe19Sbellard     m_max = nb_tbs - 1;
1095a513fe19Sbellard     while (m_min <= m_max) {
1096a513fe19Sbellard         m = (m_min + m_max) >> 1;
1097a513fe19Sbellard         tb = &tbs[m];
1098a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1099a513fe19Sbellard         if (v == tc_ptr)
1100a513fe19Sbellard             return tb;
1101a513fe19Sbellard         else if (tc_ptr < v) {
1102a513fe19Sbellard             m_max = m - 1;
1103a513fe19Sbellard         } else {
1104a513fe19Sbellard             m_min = m + 1;
1105a513fe19Sbellard         }
1106a513fe19Sbellard     }
1107a513fe19Sbellard     return &tbs[m_max];
1108a513fe19Sbellard }
11097501267eSbellard 
1110ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1111ea041c0eSbellard 
1112ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1113ea041c0eSbellard {
1114ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1115ea041c0eSbellard     unsigned int n1;
1116ea041c0eSbellard 
1117ea041c0eSbellard     tb1 = tb->jmp_next[n];
1118ea041c0eSbellard     if (tb1 != NULL) {
1119ea041c0eSbellard         /* find head of list */
1120ea041c0eSbellard         for(;;) {
1121ea041c0eSbellard             n1 = (long)tb1 & 3;
1122ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1123ea041c0eSbellard             if (n1 == 2)
1124ea041c0eSbellard                 break;
1125ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1126ea041c0eSbellard         }
1127ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1128ea041c0eSbellard         tb_next = tb1;
1129ea041c0eSbellard 
1130ea041c0eSbellard         /* remove tb from the jmp_first list */
1131ea041c0eSbellard         ptb = &tb_next->jmp_first;
1132ea041c0eSbellard         for(;;) {
1133ea041c0eSbellard             tb1 = *ptb;
1134ea041c0eSbellard             n1 = (long)tb1 & 3;
1135ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136ea041c0eSbellard             if (n1 == n && tb1 == tb)
1137ea041c0eSbellard                 break;
1138ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1139ea041c0eSbellard         }
1140ea041c0eSbellard         *ptb = tb->jmp_next[n];
1141ea041c0eSbellard         tb->jmp_next[n] = NULL;
1142ea041c0eSbellard 
1143ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1144ea041c0eSbellard         tb_reset_jump(tb, n);
1145ea041c0eSbellard 
11460124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1147ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1148ea041c0eSbellard     }
1149ea041c0eSbellard }
1150ea041c0eSbellard 
1151ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1152ea041c0eSbellard {
1153ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1154ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1155ea041c0eSbellard }
1156ea041c0eSbellard 
11571fddef4bSbellard #if defined(TARGET_HAS_ICE)
1158d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1159d720b93dSbellard {
11609b3c35e0Sj_mayer     target_phys_addr_t addr;
11619b3c35e0Sj_mayer     target_ulong pd;
1162c2f07f81Spbrook     ram_addr_t ram_addr;
1163c2f07f81Spbrook     PhysPageDesc *p;
1164d720b93dSbellard 
1165c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1166c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1167c2f07f81Spbrook     if (!p) {
1168c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1169c2f07f81Spbrook     } else {
1170c2f07f81Spbrook         pd = p->phys_offset;
1171c2f07f81Spbrook     }
1172c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1173706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1174d720b93dSbellard }
1175c27004ecSbellard #endif
1176d720b93dSbellard 
11776658ffb8Spbrook /* Add a watchpoint.  */
11786658ffb8Spbrook int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
11796658ffb8Spbrook {
11806658ffb8Spbrook     int i;
11816658ffb8Spbrook 
11826658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
11836658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr)
11846658ffb8Spbrook             return 0;
11856658ffb8Spbrook     }
11866658ffb8Spbrook     if (env->nb_watchpoints >= MAX_WATCHPOINTS)
11876658ffb8Spbrook         return -1;
11886658ffb8Spbrook 
11896658ffb8Spbrook     i = env->nb_watchpoints++;
11906658ffb8Spbrook     env->watchpoint[i].vaddr = addr;
11916658ffb8Spbrook     tlb_flush_page(env, addr);
11926658ffb8Spbrook     /* FIXME: This flush is needed because of the hack to make memory ops
11936658ffb8Spbrook        terminate the TB.  It can be removed once the proper IO trap and
11946658ffb8Spbrook        re-execute bits are in.  */
11956658ffb8Spbrook     tb_flush(env);
11966658ffb8Spbrook     return i;
11976658ffb8Spbrook }
11986658ffb8Spbrook 
11996658ffb8Spbrook /* Remove a watchpoint.  */
12006658ffb8Spbrook int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
12016658ffb8Spbrook {
12026658ffb8Spbrook     int i;
12036658ffb8Spbrook 
12046658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
12056658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr) {
12066658ffb8Spbrook             env->nb_watchpoints--;
12076658ffb8Spbrook             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
12086658ffb8Spbrook             tlb_flush_page(env, addr);
12096658ffb8Spbrook             return 0;
12106658ffb8Spbrook         }
12116658ffb8Spbrook     }
12126658ffb8Spbrook     return -1;
12136658ffb8Spbrook }
12146658ffb8Spbrook 
12157d03f82fSedgar_igl /* Remove all watchpoints. */
12167d03f82fSedgar_igl void cpu_watchpoint_remove_all(CPUState *env) {
12177d03f82fSedgar_igl     int i;
12187d03f82fSedgar_igl 
12197d03f82fSedgar_igl     for (i = 0; i < env->nb_watchpoints; i++) {
12207d03f82fSedgar_igl         tlb_flush_page(env, env->watchpoint[i].vaddr);
12217d03f82fSedgar_igl     }
12227d03f82fSedgar_igl     env->nb_watchpoints = 0;
12237d03f82fSedgar_igl }
12247d03f82fSedgar_igl 
1225c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1226c33a346eSbellard    breakpoint is reached */
12272e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
12284c3a88a2Sbellard {
12291fddef4bSbellard #if defined(TARGET_HAS_ICE)
12304c3a88a2Sbellard     int i;
12314c3a88a2Sbellard 
12324c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
12334c3a88a2Sbellard         if (env->breakpoints[i] == pc)
12344c3a88a2Sbellard             return 0;
12354c3a88a2Sbellard     }
12364c3a88a2Sbellard 
12374c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
12384c3a88a2Sbellard         return -1;
12394c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1240d720b93dSbellard 
1241d720b93dSbellard     breakpoint_invalidate(env, pc);
12424c3a88a2Sbellard     return 0;
12434c3a88a2Sbellard #else
12444c3a88a2Sbellard     return -1;
12454c3a88a2Sbellard #endif
12464c3a88a2Sbellard }
12474c3a88a2Sbellard 
12487d03f82fSedgar_igl /* remove all breakpoints */
12497d03f82fSedgar_igl void cpu_breakpoint_remove_all(CPUState *env) {
12507d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
12517d03f82fSedgar_igl     int i;
12527d03f82fSedgar_igl     for(i = 0; i < env->nb_breakpoints; i++) {
12537d03f82fSedgar_igl         breakpoint_invalidate(env, env->breakpoints[i]);
12547d03f82fSedgar_igl     }
12557d03f82fSedgar_igl     env->nb_breakpoints = 0;
12567d03f82fSedgar_igl #endif
12577d03f82fSedgar_igl }
12587d03f82fSedgar_igl 
12594c3a88a2Sbellard /* remove a breakpoint */
12602e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
12614c3a88a2Sbellard {
12621fddef4bSbellard #if defined(TARGET_HAS_ICE)
12634c3a88a2Sbellard     int i;
12644c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
12654c3a88a2Sbellard         if (env->breakpoints[i] == pc)
12664c3a88a2Sbellard             goto found;
12674c3a88a2Sbellard     }
12684c3a88a2Sbellard     return -1;
12694c3a88a2Sbellard  found:
12704c3a88a2Sbellard     env->nb_breakpoints--;
12711fddef4bSbellard     if (i < env->nb_breakpoints)
12721fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1273d720b93dSbellard 
1274d720b93dSbellard     breakpoint_invalidate(env, pc);
12754c3a88a2Sbellard     return 0;
12764c3a88a2Sbellard #else
12774c3a88a2Sbellard     return -1;
12784c3a88a2Sbellard #endif
12794c3a88a2Sbellard }
12804c3a88a2Sbellard 
1281c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1282c33a346eSbellard    CPU loop after each instruction */
1283c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1284c33a346eSbellard {
12851fddef4bSbellard #if defined(TARGET_HAS_ICE)
1286c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1287c33a346eSbellard         env->singlestep_enabled = enabled;
1288c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
12899fa3e853Sbellard         /* XXX: only flush what is necessary */
12900124311eSbellard         tb_flush(env);
1291c33a346eSbellard     }
1292c33a346eSbellard #endif
1293c33a346eSbellard }
1294c33a346eSbellard 
129534865134Sbellard /* enable or disable low levels log */
129634865134Sbellard void cpu_set_log(int log_flags)
129734865134Sbellard {
129834865134Sbellard     loglevel = log_flags;
129934865134Sbellard     if (loglevel && !logfile) {
130011fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
130134865134Sbellard         if (!logfile) {
130234865134Sbellard             perror(logfilename);
130334865134Sbellard             _exit(1);
130434865134Sbellard         }
13059fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13069fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
13079fa3e853Sbellard         {
13089fa3e853Sbellard             static uint8_t logfile_buf[4096];
13099fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
13109fa3e853Sbellard         }
13119fa3e853Sbellard #else
131234865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
13139fa3e853Sbellard #endif
1314e735b91cSpbrook         log_append = 1;
1315e735b91cSpbrook     }
1316e735b91cSpbrook     if (!loglevel && logfile) {
1317e735b91cSpbrook         fclose(logfile);
1318e735b91cSpbrook         logfile = NULL;
131934865134Sbellard     }
132034865134Sbellard }
132134865134Sbellard 
132234865134Sbellard void cpu_set_log_filename(const char *filename)
132334865134Sbellard {
132434865134Sbellard     logfilename = strdup(filename);
1325e735b91cSpbrook     if (logfile) {
1326e735b91cSpbrook         fclose(logfile);
1327e735b91cSpbrook         logfile = NULL;
1328e735b91cSpbrook     }
1329e735b91cSpbrook     cpu_set_log(loglevel);
133034865134Sbellard }
1331c33a346eSbellard 
13320124311eSbellard /* mask must never be zero, except for A20 change call */
133368a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1334ea041c0eSbellard {
1335ea041c0eSbellard     TranslationBlock *tb;
133615a51156Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1337ea041c0eSbellard 
133868a79315Sbellard     env->interrupt_request |= mask;
1339ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1340ea041c0eSbellard        all the potentially executing TB */
1341ea041c0eSbellard     tb = env->current_tb;
1342ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1343ee8b7021Sbellard         env->current_tb = NULL;
1344ea041c0eSbellard         tb_reset_jump_recursive(tb);
134515a51156Saurel32         resetlock(&interrupt_lock);
1346ea041c0eSbellard     }
1347ea041c0eSbellard }
1348ea041c0eSbellard 
1349b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1350b54ad049Sbellard {
1351b54ad049Sbellard     env->interrupt_request &= ~mask;
1352b54ad049Sbellard }
1353b54ad049Sbellard 
1354f193c797Sbellard CPULogItem cpu_log_items[] = {
1355f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1356f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1357f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1358f193c797Sbellard       "show target assembly code for each compiled TB" },
1359f193c797Sbellard     { CPU_LOG_TB_OP, "op",
136057fec1feSbellard       "show micro ops for each compiled TB" },
1361f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1362e01a1157Sblueswir1       "show micro ops "
1363e01a1157Sblueswir1 #ifdef TARGET_I386
1364e01a1157Sblueswir1       "before eflags optimization and "
1365f193c797Sbellard #endif
1366e01a1157Sblueswir1       "after liveness analysis" },
1367f193c797Sbellard     { CPU_LOG_INT, "int",
1368f193c797Sbellard       "show interrupts/exceptions in short format" },
1369f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1370f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
13719fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1372e91c8a77Sths       "show CPU state before block translation" },
1373f193c797Sbellard #ifdef TARGET_I386
1374f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1375f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1376f193c797Sbellard #endif
13778e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1378fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1379fd872598Sbellard       "show all i/o ports accesses" },
13808e3a9fd2Sbellard #endif
1381f193c797Sbellard     { 0, NULL, NULL },
1382f193c797Sbellard };
1383f193c797Sbellard 
1384f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1385f193c797Sbellard {
1386f193c797Sbellard     if (strlen(s2) != n)
1387f193c797Sbellard         return 0;
1388f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1389f193c797Sbellard }
1390f193c797Sbellard 
1391f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1392f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1393f193c797Sbellard {
1394f193c797Sbellard     CPULogItem *item;
1395f193c797Sbellard     int mask;
1396f193c797Sbellard     const char *p, *p1;
1397f193c797Sbellard 
1398f193c797Sbellard     p = str;
1399f193c797Sbellard     mask = 0;
1400f193c797Sbellard     for(;;) {
1401f193c797Sbellard         p1 = strchr(p, ',');
1402f193c797Sbellard         if (!p1)
1403f193c797Sbellard             p1 = p + strlen(p);
14048e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
14058e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
14068e3a9fd2Sbellard 			mask |= item->mask;
14078e3a9fd2Sbellard 		}
14088e3a9fd2Sbellard 	} else {
1409f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1410f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1411f193c797Sbellard                 goto found;
1412f193c797Sbellard         }
1413f193c797Sbellard         return 0;
14148e3a9fd2Sbellard 	}
1415f193c797Sbellard     found:
1416f193c797Sbellard         mask |= item->mask;
1417f193c797Sbellard         if (*p1 != ',')
1418f193c797Sbellard             break;
1419f193c797Sbellard         p = p1 + 1;
1420f193c797Sbellard     }
1421f193c797Sbellard     return mask;
1422f193c797Sbellard }
1423ea041c0eSbellard 
14247501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
14257501267eSbellard {
14267501267eSbellard     va_list ap;
1427493ae1f0Spbrook     va_list ap2;
14287501267eSbellard 
14297501267eSbellard     va_start(ap, fmt);
1430493ae1f0Spbrook     va_copy(ap2, ap);
14317501267eSbellard     fprintf(stderr, "qemu: fatal: ");
14327501267eSbellard     vfprintf(stderr, fmt, ap);
14337501267eSbellard     fprintf(stderr, "\n");
14347501267eSbellard #ifdef TARGET_I386
14357fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
14367fe48483Sbellard #else
14377fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
14387501267eSbellard #endif
1439924edcaeSbalrog     if (logfile) {
1440f9373291Sj_mayer         fprintf(logfile, "qemu: fatal: ");
1441493ae1f0Spbrook         vfprintf(logfile, fmt, ap2);
1442f9373291Sj_mayer         fprintf(logfile, "\n");
1443f9373291Sj_mayer #ifdef TARGET_I386
1444f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445f9373291Sj_mayer #else
1446f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, 0);
1447f9373291Sj_mayer #endif
1448924edcaeSbalrog         fflush(logfile);
1449924edcaeSbalrog         fclose(logfile);
1450924edcaeSbalrog     }
1451493ae1f0Spbrook     va_end(ap2);
1452f9373291Sj_mayer     va_end(ap);
14537501267eSbellard     abort();
14547501267eSbellard }
14557501267eSbellard 
1456c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1457c5be9f08Sths {
145801ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1459c5be9f08Sths     /* preserve chaining and index */
1460c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1461c5be9f08Sths     int cpu_index = new_env->cpu_index;
1462c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
1463c5be9f08Sths     new_env->next_cpu = next_cpu;
1464c5be9f08Sths     new_env->cpu_index = cpu_index;
1465c5be9f08Sths     return new_env;
1466c5be9f08Sths }
1467c5be9f08Sths 
14680124311eSbellard #if !defined(CONFIG_USER_ONLY)
14690124311eSbellard 
14705c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
14715c751e99Sedgar_igl {
14725c751e99Sedgar_igl     unsigned int i;
14735c751e99Sedgar_igl 
14745c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
14755c751e99Sedgar_igl        overlap the flushed page.  */
14765c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
14775c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
14785c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
14795c751e99Sedgar_igl 
14805c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
14815c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
14825c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
14835c751e99Sedgar_igl }
14845c751e99Sedgar_igl 
1485ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1486ee8b7021Sbellard    implemented yet) */
1487ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
148833417e70Sbellard {
148933417e70Sbellard     int i;
14900124311eSbellard 
14919fa3e853Sbellard #if defined(DEBUG_TLB)
14929fa3e853Sbellard     printf("tlb_flush:\n");
14939fa3e853Sbellard #endif
14940124311eSbellard     /* must reset current TB so that interrupts cannot modify the
14950124311eSbellard        links while we are modifying them */
14960124311eSbellard     env->current_tb = NULL;
14970124311eSbellard 
149833417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
149984b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
150084b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
150184b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
150284b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
150384b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
150484b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
15056fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15066fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
15076fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
15086fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
15096fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15106fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
15116fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
15126fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
15136fa4cea9Sj_mayer #endif
15146fa4cea9Sj_mayer #endif
151533417e70Sbellard     }
15169fa3e853Sbellard 
15178a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
15189fa3e853Sbellard 
15199fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15209fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
15219fa3e853Sbellard #endif
15220a962c02Sbellard #ifdef USE_KQEMU
15230a962c02Sbellard     if (env->kqemu_enabled) {
15240a962c02Sbellard         kqemu_flush(env, flush_global);
15250a962c02Sbellard     }
15260a962c02Sbellard #endif
1527e3db7226Sbellard     tlb_flush_count++;
152833417e70Sbellard }
152933417e70Sbellard 
1530274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
153161382a50Sbellard {
153284b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
153384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
153484b7b8e7Sbellard         addr == (tlb_entry->addr_write &
153584b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
153684b7b8e7Sbellard         addr == (tlb_entry->addr_code &
153784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
153884b7b8e7Sbellard         tlb_entry->addr_read = -1;
153984b7b8e7Sbellard         tlb_entry->addr_write = -1;
154084b7b8e7Sbellard         tlb_entry->addr_code = -1;
154184b7b8e7Sbellard     }
154261382a50Sbellard }
154361382a50Sbellard 
15442e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
154533417e70Sbellard {
15468a40a180Sbellard     int i;
15470124311eSbellard 
15489fa3e853Sbellard #if defined(DEBUG_TLB)
1549108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
15509fa3e853Sbellard #endif
15510124311eSbellard     /* must reset current TB so that interrupts cannot modify the
15520124311eSbellard        links while we are modifying them */
15530124311eSbellard     env->current_tb = NULL;
155433417e70Sbellard 
155561382a50Sbellard     addr &= TARGET_PAGE_MASK;
155633417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
155784b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
155884b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
15596fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15606fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
15616fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15626fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
15636fa4cea9Sj_mayer #endif
15646fa4cea9Sj_mayer #endif
15650124311eSbellard 
15665c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
156761382a50Sbellard 
15689fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15699fa3e853Sbellard     if (addr < MMAP_AREA_END)
15709fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
15719fa3e853Sbellard #endif
15720a962c02Sbellard #ifdef USE_KQEMU
15730a962c02Sbellard     if (env->kqemu_enabled) {
15740a962c02Sbellard         kqemu_flush_page(env, addr);
15750a962c02Sbellard     }
15760a962c02Sbellard #endif
15779fa3e853Sbellard }
15789fa3e853Sbellard 
15799fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
15809fa3e853Sbellard    can be detected */
15816a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
158261382a50Sbellard {
15836a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
15846a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
15856a00d601Sbellard                                     CODE_DIRTY_FLAG);
15869fa3e853Sbellard }
15879fa3e853Sbellard 
15889fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
15893a7d929eSbellard    tested for self modifying code */
15903a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
15913a7d929eSbellard                                     target_ulong vaddr)
15929fa3e853Sbellard {
15933a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
15949fa3e853Sbellard }
15959fa3e853Sbellard 
15961ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
15971ccde1cbSbellard                                          unsigned long start, unsigned long length)
15981ccde1cbSbellard {
15991ccde1cbSbellard     unsigned long addr;
160084b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
160184b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
16021ccde1cbSbellard         if ((addr - start) < length) {
160384b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
16041ccde1cbSbellard         }
16051ccde1cbSbellard     }
16061ccde1cbSbellard }
16071ccde1cbSbellard 
16083a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
16090a962c02Sbellard                                      int dirty_flags)
16101ccde1cbSbellard {
16111ccde1cbSbellard     CPUState *env;
16124f2ac237Sbellard     unsigned long length, start1;
16130a962c02Sbellard     int i, mask, len;
16140a962c02Sbellard     uint8_t *p;
16151ccde1cbSbellard 
16161ccde1cbSbellard     start &= TARGET_PAGE_MASK;
16171ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
16181ccde1cbSbellard 
16191ccde1cbSbellard     length = end - start;
16201ccde1cbSbellard     if (length == 0)
16211ccde1cbSbellard         return;
16220a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
16233a7d929eSbellard #ifdef USE_KQEMU
16246a00d601Sbellard     /* XXX: should not depend on cpu context */
16256a00d601Sbellard     env = first_cpu;
16263a7d929eSbellard     if (env->kqemu_enabled) {
1627f23db169Sbellard         ram_addr_t addr;
1628f23db169Sbellard         addr = start;
1629f23db169Sbellard         for(i = 0; i < len; i++) {
1630f23db169Sbellard             kqemu_set_notdirty(env, addr);
1631f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1632f23db169Sbellard         }
16333a7d929eSbellard     }
16343a7d929eSbellard #endif
1635f23db169Sbellard     mask = ~dirty_flags;
1636f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1637f23db169Sbellard     for(i = 0; i < len; i++)
1638f23db169Sbellard         p[i] &= mask;
1639f23db169Sbellard 
16401ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
16411ccde1cbSbellard        when accessing the range */
164259817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
16436a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
16441ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
164584b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
16461ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
164784b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
16486fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
16496fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
16506fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
16516fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
16526fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
16536fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
16546fa4cea9Sj_mayer #endif
16556fa4cea9Sj_mayer #endif
16566a00d601Sbellard     }
165759817ccbSbellard 
165859817ccbSbellard #if !defined(CONFIG_SOFTMMU)
165959817ccbSbellard     /* XXX: this is expensive */
166059817ccbSbellard     {
166159817ccbSbellard         VirtPageDesc *p;
166259817ccbSbellard         int j;
166359817ccbSbellard         target_ulong addr;
166459817ccbSbellard 
166559817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
166659817ccbSbellard             p = l1_virt_map[i];
166759817ccbSbellard             if (p) {
166859817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
166959817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
167059817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
167159817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
167259817ccbSbellard                         (p->prot & PROT_WRITE)) {
167359817ccbSbellard                         if (addr < MMAP_AREA_END) {
167459817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
167559817ccbSbellard                                      p->prot & ~PROT_WRITE);
167659817ccbSbellard                         }
167759817ccbSbellard                     }
167859817ccbSbellard                     addr += TARGET_PAGE_SIZE;
167959817ccbSbellard                     p++;
168059817ccbSbellard                 }
168159817ccbSbellard             }
168259817ccbSbellard         }
168359817ccbSbellard     }
168459817ccbSbellard #endif
16851ccde1cbSbellard }
16861ccde1cbSbellard 
16873a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
16883a7d929eSbellard {
16893a7d929eSbellard     ram_addr_t ram_addr;
16903a7d929eSbellard 
169184b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
169284b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
16933a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
16943a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
169584b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
16963a7d929eSbellard         }
16973a7d929eSbellard     }
16983a7d929eSbellard }
16993a7d929eSbellard 
17003a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
17013a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
17023a7d929eSbellard {
17033a7d929eSbellard     int i;
17043a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
170584b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
17063a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
170784b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
17086fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17096fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
17106fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
17116fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17126fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
17136fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
17146fa4cea9Sj_mayer #endif
17156fa4cea9Sj_mayer #endif
17163a7d929eSbellard }
17173a7d929eSbellard 
17181ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
17191ccde1cbSbellard                                   unsigned long start)
17201ccde1cbSbellard {
17211ccde1cbSbellard     unsigned long addr;
172284b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
172384b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
17241ccde1cbSbellard         if (addr == start) {
172584b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
17261ccde1cbSbellard         }
17271ccde1cbSbellard     }
17281ccde1cbSbellard }
17291ccde1cbSbellard 
17301ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
17311ccde1cbSbellard    addr so that it is no longer dirty */
17326a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
17336a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
17341ccde1cbSbellard {
17351ccde1cbSbellard     int i;
17361ccde1cbSbellard 
17371ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
17381ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
173984b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
174084b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
17416fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17426fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[2][i], addr);
17436fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17446fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[3][i], addr);
17456fa4cea9Sj_mayer #endif
17466fa4cea9Sj_mayer #endif
17471ccde1cbSbellard }
17481ccde1cbSbellard 
174959817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
175059817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
175159817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
175259817ccbSbellard    conflicting with the host address space). */
175384b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
17542e12669aSbellard                       target_phys_addr_t paddr, int prot,
17556ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
17569fa3e853Sbellard {
175792e873b9Sbellard     PhysPageDesc *p;
17584f2ac237Sbellard     unsigned long pd;
17599fa3e853Sbellard     unsigned int index;
17604f2ac237Sbellard     target_ulong address;
1761108c49b8Sbellard     target_phys_addr_t addend;
17629fa3e853Sbellard     int ret;
176384b7b8e7Sbellard     CPUTLBEntry *te;
17646658ffb8Spbrook     int i;
17659fa3e853Sbellard 
176692e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
17679fa3e853Sbellard     if (!p) {
17689fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
17699fa3e853Sbellard     } else {
17709fa3e853Sbellard         pd = p->phys_offset;
17719fa3e853Sbellard     }
17729fa3e853Sbellard #if defined(DEBUG_TLB)
17736ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
17746ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
17759fa3e853Sbellard #endif
17769fa3e853Sbellard 
17779fa3e853Sbellard     ret = 0;
17789fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
17799fa3e853Sbellard     if (is_softmmu)
17809fa3e853Sbellard #endif
17819fa3e853Sbellard     {
17822a4188a3Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
17839fa3e853Sbellard             /* IO memory case */
17849fa3e853Sbellard             address = vaddr | pd;
17859fa3e853Sbellard             addend = paddr;
17869fa3e853Sbellard         } else {
17879fa3e853Sbellard             /* standard memory */
17889fa3e853Sbellard             address = vaddr;
17899fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
17909fa3e853Sbellard         }
17919fa3e853Sbellard 
17926658ffb8Spbrook         /* Make accesses to pages with watchpoints go via the
17936658ffb8Spbrook            watchpoint trap routines.  */
17946658ffb8Spbrook         for (i = 0; i < env->nb_watchpoints; i++) {
17956658ffb8Spbrook             if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
17966658ffb8Spbrook                 if (address & ~TARGET_PAGE_MASK) {
1797d79acba4Sbalrog                     env->watchpoint[i].addend = 0;
17986658ffb8Spbrook                     address = vaddr | io_mem_watch;
17996658ffb8Spbrook                 } else {
1800d79acba4Sbalrog                     env->watchpoint[i].addend = pd - paddr +
1801d79acba4Sbalrog                         (unsigned long) phys_ram_base;
18026658ffb8Spbrook                     /* TODO: Figure out how to make read watchpoints coexist
18036658ffb8Spbrook                        with code.  */
18046658ffb8Spbrook                     pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
18056658ffb8Spbrook                 }
18066658ffb8Spbrook             }
18076658ffb8Spbrook         }
18086658ffb8Spbrook 
180990f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
18109fa3e853Sbellard         addend -= vaddr;
18116ebbf390Sj_mayer         te = &env->tlb_table[mmu_idx][index];
181284b7b8e7Sbellard         te->addend = addend;
181367b915a5Sbellard         if (prot & PAGE_READ) {
181484b7b8e7Sbellard             te->addr_read = address;
18159fa3e853Sbellard         } else {
181684b7b8e7Sbellard             te->addr_read = -1;
181784b7b8e7Sbellard         }
18185c751e99Sedgar_igl 
181984b7b8e7Sbellard         if (prot & PAGE_EXEC) {
182084b7b8e7Sbellard             te->addr_code = address;
182184b7b8e7Sbellard         } else {
182284b7b8e7Sbellard             te->addr_code = -1;
18239fa3e853Sbellard         }
182467b915a5Sbellard         if (prot & PAGE_WRITE) {
1825856074ecSbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1826856074ecSbellard                 (pd & IO_MEM_ROMD)) {
1827856074ecSbellard                 /* write access calls the I/O callback */
1828856074ecSbellard                 te->addr_write = vaddr |
1829856074ecSbellard                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
18303a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
18311ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
183284b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
18339fa3e853Sbellard             } else {
183484b7b8e7Sbellard                 te->addr_write = address;
18359fa3e853Sbellard             }
18369fa3e853Sbellard         } else {
183784b7b8e7Sbellard             te->addr_write = -1;
18389fa3e853Sbellard         }
18399fa3e853Sbellard     }
18409fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
18419fa3e853Sbellard     else {
18429fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
18439fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
18449fa3e853Sbellard                soft MMU */
18459fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
18469fa3e853Sbellard                 ret = 2;
18479fa3e853Sbellard         } else {
18489fa3e853Sbellard             void *map_addr;
184959817ccbSbellard 
185059817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
185159817ccbSbellard                 ret = 2;
185259817ccbSbellard             } else {
18539fa3e853Sbellard                 if (prot & PROT_WRITE) {
185459817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1855d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
185659817ccbSbellard                         first_tb ||
1857d720b93dSbellard #endif
185859817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
185959817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
18609fa3e853Sbellard                         /* ROM: we do as if code was inside */
18619fa3e853Sbellard                         /* if code is present, we only map as read only and save the
18629fa3e853Sbellard                            original mapping */
18639fa3e853Sbellard                         VirtPageDesc *vp;
18649fa3e853Sbellard 
186590f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
18669fa3e853Sbellard                         vp->phys_addr = pd;
18679fa3e853Sbellard                         vp->prot = prot;
18689fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
18699fa3e853Sbellard                         prot &= ~PAGE_WRITE;
18709fa3e853Sbellard                     }
18719fa3e853Sbellard                 }
18729fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
18739fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
18749fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
18759fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
18769fa3e853Sbellard                               paddr, vaddr);
18779fa3e853Sbellard                 }
18789fa3e853Sbellard             }
18799fa3e853Sbellard         }
188059817ccbSbellard     }
18819fa3e853Sbellard #endif
18829fa3e853Sbellard     return ret;
18839fa3e853Sbellard }
18849fa3e853Sbellard 
18859fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
18869fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
188753a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
18889fa3e853Sbellard {
18899fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
18909fa3e853Sbellard     VirtPageDesc *vp;
18919fa3e853Sbellard 
18929fa3e853Sbellard #if defined(DEBUG_TLB)
18939fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
18949fa3e853Sbellard #endif
18959fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
189659817ccbSbellard 
189759817ccbSbellard     /* if it is not mapped, no need to worry here */
189859817ccbSbellard     if (addr >= MMAP_AREA_END)
189959817ccbSbellard         return 0;
19009fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
19019fa3e853Sbellard     if (!vp)
19029fa3e853Sbellard         return 0;
19039fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
19049fa3e853Sbellard        validates only the code TLB */
19059fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
19069fa3e853Sbellard         return 0;
19079fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
19089fa3e853Sbellard         return 0;
19099fa3e853Sbellard #if defined(DEBUG_TLB)
19109fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
19119fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
19129fa3e853Sbellard #endif
191359817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
191459817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
191559817ccbSbellard                   (unsigned long)addr, vp->prot);
1916d720b93dSbellard     /* set the dirty bit */
19170a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1918d720b93dSbellard     /* flush the code inside */
1919d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
19209fa3e853Sbellard     return 1;
19219fa3e853Sbellard #else
19229fa3e853Sbellard     return 0;
19239fa3e853Sbellard #endif
192433417e70Sbellard }
192533417e70Sbellard 
19260124311eSbellard #else
19270124311eSbellard 
1928ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
19290124311eSbellard {
19300124311eSbellard }
19310124311eSbellard 
19322e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
19330124311eSbellard {
19340124311eSbellard }
19350124311eSbellard 
193684b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
19372e12669aSbellard                       target_phys_addr_t paddr, int prot,
19386ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
193933417e70Sbellard {
19409fa3e853Sbellard     return 0;
194133417e70Sbellard }
194233417e70Sbellard 
19439fa3e853Sbellard /* dump memory mappings */
19449fa3e853Sbellard void page_dump(FILE *f)
194533417e70Sbellard {
19469fa3e853Sbellard     unsigned long start, end;
19479fa3e853Sbellard     int i, j, prot, prot1;
19489fa3e853Sbellard     PageDesc *p;
19499fa3e853Sbellard 
19509fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
19519fa3e853Sbellard             "start", "end", "size", "prot");
19529fa3e853Sbellard     start = -1;
19539fa3e853Sbellard     end = -1;
19549fa3e853Sbellard     prot = 0;
19559fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
19569fa3e853Sbellard         if (i < L1_SIZE)
19579fa3e853Sbellard             p = l1_map[i];
19589fa3e853Sbellard         else
19599fa3e853Sbellard             p = NULL;
19609fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
196133417e70Sbellard             if (!p)
19629fa3e853Sbellard                 prot1 = 0;
19639fa3e853Sbellard             else
19649fa3e853Sbellard                 prot1 = p[j].flags;
19659fa3e853Sbellard             if (prot1 != prot) {
19669fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
19679fa3e853Sbellard                 if (start != -1) {
19689fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
19699fa3e853Sbellard                             start, end, end - start,
19709fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
19719fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
19729fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
197333417e70Sbellard                 }
19749fa3e853Sbellard                 if (prot1 != 0)
19759fa3e853Sbellard                     start = end;
19769fa3e853Sbellard                 else
19779fa3e853Sbellard                     start = -1;
19789fa3e853Sbellard                 prot = prot1;
19799fa3e853Sbellard             }
19809fa3e853Sbellard             if (!p)
19819fa3e853Sbellard                 break;
19829fa3e853Sbellard         }
19839fa3e853Sbellard     }
19849fa3e853Sbellard }
19859fa3e853Sbellard 
198653a5960aSpbrook int page_get_flags(target_ulong address)
19879fa3e853Sbellard {
19889fa3e853Sbellard     PageDesc *p;
19899fa3e853Sbellard 
19909fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
19919fa3e853Sbellard     if (!p)
19929fa3e853Sbellard         return 0;
19939fa3e853Sbellard     return p->flags;
19949fa3e853Sbellard }
19959fa3e853Sbellard 
19969fa3e853Sbellard /* modify the flags of a page and invalidate the code if
19979fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
19989fa3e853Sbellard    depending on PAGE_WRITE */
199953a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
20009fa3e853Sbellard {
20019fa3e853Sbellard     PageDesc *p;
200253a5960aSpbrook     target_ulong addr;
20039fa3e853Sbellard 
20049fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
20059fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
20069fa3e853Sbellard     if (flags & PAGE_WRITE)
20079fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
20089fa3e853Sbellard     spin_lock(&tb_lock);
20099fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
20109fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
20119fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
20129fa3e853Sbellard            inside */
20139fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
20149fa3e853Sbellard             (flags & PAGE_WRITE) &&
20159fa3e853Sbellard             p->first_tb) {
2016d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
20179fa3e853Sbellard         }
20189fa3e853Sbellard         p->flags = flags;
20199fa3e853Sbellard     }
20209fa3e853Sbellard     spin_unlock(&tb_lock);
20219fa3e853Sbellard }
20229fa3e853Sbellard 
20233d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
20243d97b40bSths {
20253d97b40bSths     PageDesc *p;
20263d97b40bSths     target_ulong end;
20273d97b40bSths     target_ulong addr;
20283d97b40bSths 
20293d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
20303d97b40bSths     start = start & TARGET_PAGE_MASK;
20313d97b40bSths 
20323d97b40bSths     if( end < start )
20333d97b40bSths         /* we've wrapped around */
20343d97b40bSths         return -1;
20353d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
20363d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
20373d97b40bSths         if( !p )
20383d97b40bSths             return -1;
20393d97b40bSths         if( !(p->flags & PAGE_VALID) )
20403d97b40bSths             return -1;
20413d97b40bSths 
2042dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
20433d97b40bSths             return -1;
2044dae3270cSbellard         if (flags & PAGE_WRITE) {
2045dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
20463d97b40bSths                 return -1;
2047dae3270cSbellard             /* unprotect the page if it was put read-only because it
2048dae3270cSbellard                contains translated code */
2049dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2050dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2051dae3270cSbellard                     return -1;
2052dae3270cSbellard             }
2053dae3270cSbellard             return 0;
2054dae3270cSbellard         }
20553d97b40bSths     }
20563d97b40bSths     return 0;
20573d97b40bSths }
20583d97b40bSths 
20599fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
20609fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
206153a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
20629fa3e853Sbellard {
20639fa3e853Sbellard     unsigned int page_index, prot, pindex;
20649fa3e853Sbellard     PageDesc *p, *p1;
206553a5960aSpbrook     target_ulong host_start, host_end, addr;
20669fa3e853Sbellard 
206783fb7adfSbellard     host_start = address & qemu_host_page_mask;
20689fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
20699fa3e853Sbellard     p1 = page_find(page_index);
20709fa3e853Sbellard     if (!p1)
20719fa3e853Sbellard         return 0;
207283fb7adfSbellard     host_end = host_start + qemu_host_page_size;
20739fa3e853Sbellard     p = p1;
20749fa3e853Sbellard     prot = 0;
20759fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
20769fa3e853Sbellard         prot |= p->flags;
20779fa3e853Sbellard         p++;
20789fa3e853Sbellard     }
20799fa3e853Sbellard     /* if the page was really writable, then we change its
20809fa3e853Sbellard        protection back to writable */
20819fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
20829fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
20839fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
208453a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
20859fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
20869fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
20879fa3e853Sbellard             /* and since the content will be modified, we must invalidate
20889fa3e853Sbellard                the corresponding translated code. */
2089d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
20909fa3e853Sbellard #ifdef DEBUG_TB_CHECK
20919fa3e853Sbellard             tb_invalidate_check(address);
20929fa3e853Sbellard #endif
20939fa3e853Sbellard             return 1;
20949fa3e853Sbellard         }
20959fa3e853Sbellard     }
20969fa3e853Sbellard     return 0;
20979fa3e853Sbellard }
20989fa3e853Sbellard 
20996a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
21006a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
21011ccde1cbSbellard {
21021ccde1cbSbellard }
21039fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
210433417e70Sbellard 
2105db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
210600f82b8aSaurel32                              ram_addr_t memory);
210700f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
210800f82b8aSaurel32                            ram_addr_t orig_memory);
2109db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110db7b5426Sblueswir1                       need_subpage)                                     \
2111db7b5426Sblueswir1     do {                                                                \
2112db7b5426Sblueswir1         if (addr > start_addr)                                          \
2113db7b5426Sblueswir1             start_addr2 = 0;                                            \
2114db7b5426Sblueswir1         else {                                                          \
2115db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2116db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2117db7b5426Sblueswir1                 need_subpage = 1;                                       \
2118db7b5426Sblueswir1         }                                                               \
2119db7b5426Sblueswir1                                                                         \
212049e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2121db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2122db7b5426Sblueswir1         else {                                                          \
2123db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2125db7b5426Sblueswir1                 need_subpage = 1;                                       \
2126db7b5426Sblueswir1         }                                                               \
2127db7b5426Sblueswir1     } while (0)
2128db7b5426Sblueswir1 
212933417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
213033417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
213133417e70Sbellard    io memory page */
21322e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
213300f82b8aSaurel32                                   ram_addr_t size,
213400f82b8aSaurel32                                   ram_addr_t phys_offset)
213533417e70Sbellard {
2136108c49b8Sbellard     target_phys_addr_t addr, end_addr;
213792e873b9Sbellard     PhysPageDesc *p;
21389d42037bSbellard     CPUState *env;
213900f82b8aSaurel32     ram_addr_t orig_size = size;
2140db7b5426Sblueswir1     void *subpage;
214133417e70Sbellard 
21425fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
214349e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
214449e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2145db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2146db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
214700f82b8aSaurel32             ram_addr_t orig_memory = p->phys_offset;
2148db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2149db7b5426Sblueswir1             int need_subpage = 0;
2150db7b5426Sblueswir1 
2151db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2152db7b5426Sblueswir1                           need_subpage);
21534254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2154db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2155db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2156db7b5426Sblueswir1                                            &p->phys_offset, orig_memory);
2157db7b5426Sblueswir1                 } else {
2158db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2159db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2160db7b5426Sblueswir1                 }
2161db7b5426Sblueswir1                 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2162db7b5426Sblueswir1             } else {
2163db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2164db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2165db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2166db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2167db7b5426Sblueswir1             }
2168db7b5426Sblueswir1         } else {
2169108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
21709fa3e853Sbellard             p->phys_offset = phys_offset;
21712a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
21722a4188a3Sbellard                 (phys_offset & IO_MEM_ROMD))
217333417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
2174db7b5426Sblueswir1             else {
2175db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2176db7b5426Sblueswir1                 int need_subpage = 0;
2177db7b5426Sblueswir1 
2178db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2179db7b5426Sblueswir1                               end_addr2, need_subpage);
2180db7b5426Sblueswir1 
21814254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2182db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2183db7b5426Sblueswir1                                            &p->phys_offset, IO_MEM_UNASSIGNED);
2184db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
2185db7b5426Sblueswir1                                      phys_offset);
2186db7b5426Sblueswir1                 }
2187db7b5426Sblueswir1             }
2188db7b5426Sblueswir1         }
218933417e70Sbellard     }
21909d42037bSbellard 
21919d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
21929d42037bSbellard        reset the modified entries */
21939d42037bSbellard     /* XXX: slow ! */
21949d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
21959d42037bSbellard         tlb_flush(env, 1);
21969d42037bSbellard     }
219733417e70Sbellard }
219833417e70Sbellard 
2199ba863458Sbellard /* XXX: temporary until new memory mapping API */
220000f82b8aSaurel32 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2201ba863458Sbellard {
2202ba863458Sbellard     PhysPageDesc *p;
2203ba863458Sbellard 
2204ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2205ba863458Sbellard     if (!p)
2206ba863458Sbellard         return IO_MEM_UNASSIGNED;
2207ba863458Sbellard     return p->phys_offset;
2208ba863458Sbellard }
2209ba863458Sbellard 
2210e9a1ab19Sbellard /* XXX: better than nothing */
221100f82b8aSaurel32 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2212e9a1ab19Sbellard {
2213e9a1ab19Sbellard     ram_addr_t addr;
22147fb4fdcfSbalrog     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2215ed441467Sbellard         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2216ed441467Sbellard                 (uint64_t)size, (uint64_t)phys_ram_size);
2217e9a1ab19Sbellard         abort();
2218e9a1ab19Sbellard     }
2219e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2220e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2221e9a1ab19Sbellard     return addr;
2222e9a1ab19Sbellard }
2223e9a1ab19Sbellard 
2224e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2225e9a1ab19Sbellard {
2226e9a1ab19Sbellard }
2227e9a1ab19Sbellard 
2228a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
222933417e70Sbellard {
223067d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2231ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
223267d3b957Spbrook #endif
2233b4f0a316Sblueswir1 #ifdef TARGET_SPARC
22346c36d3faSblueswir1     do_unassigned_access(addr, 0, 0, 0);
2235f1ccf904Sths #elif TARGET_CRIS
2236f1ccf904Sths     do_unassigned_access(addr, 0, 0, 0);
2237b4f0a316Sblueswir1 #endif
223833417e70Sbellard     return 0;
223933417e70Sbellard }
224033417e70Sbellard 
2241a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
224233417e70Sbellard {
224367d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2244ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
224567d3b957Spbrook #endif
2246b4f0a316Sblueswir1 #ifdef TARGET_SPARC
22476c36d3faSblueswir1     do_unassigned_access(addr, 1, 0, 0);
2248f1ccf904Sths #elif TARGET_CRIS
2249f1ccf904Sths     do_unassigned_access(addr, 1, 0, 0);
2250b4f0a316Sblueswir1 #endif
225133417e70Sbellard }
225233417e70Sbellard 
225333417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
225433417e70Sbellard     unassigned_mem_readb,
225533417e70Sbellard     unassigned_mem_readb,
225633417e70Sbellard     unassigned_mem_readb,
225733417e70Sbellard };
225833417e70Sbellard 
225933417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
226033417e70Sbellard     unassigned_mem_writeb,
226133417e70Sbellard     unassigned_mem_writeb,
226233417e70Sbellard     unassigned_mem_writeb,
226333417e70Sbellard };
226433417e70Sbellard 
2265a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
22661ccde1cbSbellard {
22673a7d929eSbellard     unsigned long ram_addr;
22683a7d929eSbellard     int dirty_flags;
22693a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
22703a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22713a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
22723a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
22733a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
22743a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22753a7d929eSbellard #endif
22763a7d929eSbellard     }
2277c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
2278f32fc648Sbellard #ifdef USE_KQEMU
2279f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2280f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2281f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2282f32fc648Sbellard #endif
2283f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2284f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2285f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2286f23db169Sbellard        flushed */
2287f23db169Sbellard     if (dirty_flags == 0xff)
22886a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
22891ccde1cbSbellard }
22901ccde1cbSbellard 
2291a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
22921ccde1cbSbellard {
22933a7d929eSbellard     unsigned long ram_addr;
22943a7d929eSbellard     int dirty_flags;
22953a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
22963a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22973a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
22983a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
22993a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
23003a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23013a7d929eSbellard #endif
23023a7d929eSbellard     }
2303c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
2304f32fc648Sbellard #ifdef USE_KQEMU
2305f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2306f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2308f32fc648Sbellard #endif
2309f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2312f23db169Sbellard        flushed */
2313f23db169Sbellard     if (dirty_flags == 0xff)
23146a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
23151ccde1cbSbellard }
23161ccde1cbSbellard 
2317a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
23181ccde1cbSbellard {
23193a7d929eSbellard     unsigned long ram_addr;
23203a7d929eSbellard     int dirty_flags;
23213a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
23223a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23233a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
23243a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
23253a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
23263a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23273a7d929eSbellard #endif
23283a7d929eSbellard     }
2329c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
2330f32fc648Sbellard #ifdef USE_KQEMU
2331f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2332f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2333f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2334f32fc648Sbellard #endif
2335f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2336f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2337f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2338f23db169Sbellard        flushed */
2339f23db169Sbellard     if (dirty_flags == 0xff)
23406a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
23411ccde1cbSbellard }
23421ccde1cbSbellard 
23433a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
23443a7d929eSbellard     NULL, /* never used */
23453a7d929eSbellard     NULL, /* never used */
23463a7d929eSbellard     NULL, /* never used */
23473a7d929eSbellard };
23483a7d929eSbellard 
23491ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
23501ccde1cbSbellard     notdirty_mem_writeb,
23511ccde1cbSbellard     notdirty_mem_writew,
23521ccde1cbSbellard     notdirty_mem_writel,
23531ccde1cbSbellard };
23541ccde1cbSbellard 
23556658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
23566658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
23576658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
23586658ffb8Spbrook    phys routines.  */
23596658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
23606658ffb8Spbrook {
23616658ffb8Spbrook     return ldub_phys(addr);
23626658ffb8Spbrook }
23636658ffb8Spbrook 
23646658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
23656658ffb8Spbrook {
23666658ffb8Spbrook     return lduw_phys(addr);
23676658ffb8Spbrook }
23686658ffb8Spbrook 
23696658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
23706658ffb8Spbrook {
23716658ffb8Spbrook     return ldl_phys(addr);
23726658ffb8Spbrook }
23736658ffb8Spbrook 
23746658ffb8Spbrook /* Generate a debug exception if a watchpoint has been hit.
23756658ffb8Spbrook    Returns the real physical address of the access.  addr will be a host
2376d79acba4Sbalrog    address in case of a RAM location.  */
23776658ffb8Spbrook static target_ulong check_watchpoint(target_phys_addr_t addr)
23786658ffb8Spbrook {
23796658ffb8Spbrook     CPUState *env = cpu_single_env;
23806658ffb8Spbrook     target_ulong watch;
23816658ffb8Spbrook     target_ulong retaddr;
23826658ffb8Spbrook     int i;
23836658ffb8Spbrook 
23846658ffb8Spbrook     retaddr = addr;
23856658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
23866658ffb8Spbrook         watch = env->watchpoint[i].vaddr;
23876658ffb8Spbrook         if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2388d79acba4Sbalrog             retaddr = addr - env->watchpoint[i].addend;
23896658ffb8Spbrook             if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
23906658ffb8Spbrook                 cpu_single_env->watchpoint_hit = i + 1;
23916658ffb8Spbrook                 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
23926658ffb8Spbrook                 break;
23936658ffb8Spbrook             }
23946658ffb8Spbrook         }
23956658ffb8Spbrook     }
23966658ffb8Spbrook     return retaddr;
23976658ffb8Spbrook }
23986658ffb8Spbrook 
23996658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
24006658ffb8Spbrook                              uint32_t val)
24016658ffb8Spbrook {
24026658ffb8Spbrook     addr = check_watchpoint(addr);
24036658ffb8Spbrook     stb_phys(addr, val);
24046658ffb8Spbrook }
24056658ffb8Spbrook 
24066658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
24076658ffb8Spbrook                              uint32_t val)
24086658ffb8Spbrook {
24096658ffb8Spbrook     addr = check_watchpoint(addr);
24106658ffb8Spbrook     stw_phys(addr, val);
24116658ffb8Spbrook }
24126658ffb8Spbrook 
24136658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
24146658ffb8Spbrook                              uint32_t val)
24156658ffb8Spbrook {
24166658ffb8Spbrook     addr = check_watchpoint(addr);
24176658ffb8Spbrook     stl_phys(addr, val);
24186658ffb8Spbrook }
24196658ffb8Spbrook 
24206658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
24216658ffb8Spbrook     watch_mem_readb,
24226658ffb8Spbrook     watch_mem_readw,
24236658ffb8Spbrook     watch_mem_readl,
24246658ffb8Spbrook };
24256658ffb8Spbrook 
24266658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
24276658ffb8Spbrook     watch_mem_writeb,
24286658ffb8Spbrook     watch_mem_writew,
24296658ffb8Spbrook     watch_mem_writel,
24306658ffb8Spbrook };
24316658ffb8Spbrook #endif
24326658ffb8Spbrook 
2433db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2434db7b5426Sblueswir1                                  unsigned int len)
2435db7b5426Sblueswir1 {
2436db7b5426Sblueswir1     uint32_t ret;
2437db7b5426Sblueswir1     unsigned int idx;
2438db7b5426Sblueswir1 
2439db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2440db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2441db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2442db7b5426Sblueswir1            mmio, len, addr, idx);
2443db7b5426Sblueswir1 #endif
24443ee89922Sblueswir1     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2445db7b5426Sblueswir1 
2446db7b5426Sblueswir1     return ret;
2447db7b5426Sblueswir1 }
2448db7b5426Sblueswir1 
2449db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2450db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2451db7b5426Sblueswir1 {
2452db7b5426Sblueswir1     unsigned int idx;
2453db7b5426Sblueswir1 
2454db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2455db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2456db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2457db7b5426Sblueswir1            mmio, len, addr, idx, value);
2458db7b5426Sblueswir1 #endif
24593ee89922Sblueswir1     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2460db7b5426Sblueswir1 }
2461db7b5426Sblueswir1 
2462db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2463db7b5426Sblueswir1 {
2464db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2465db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2466db7b5426Sblueswir1 #endif
2467db7b5426Sblueswir1 
2468db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2469db7b5426Sblueswir1 }
2470db7b5426Sblueswir1 
2471db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2472db7b5426Sblueswir1                             uint32_t value)
2473db7b5426Sblueswir1 {
2474db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2475db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2476db7b5426Sblueswir1 #endif
2477db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2478db7b5426Sblueswir1 }
2479db7b5426Sblueswir1 
2480db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2481db7b5426Sblueswir1 {
2482db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2483db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2484db7b5426Sblueswir1 #endif
2485db7b5426Sblueswir1 
2486db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2487db7b5426Sblueswir1 }
2488db7b5426Sblueswir1 
2489db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2490db7b5426Sblueswir1                             uint32_t value)
2491db7b5426Sblueswir1 {
2492db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2493db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2494db7b5426Sblueswir1 #endif
2495db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2496db7b5426Sblueswir1 }
2497db7b5426Sblueswir1 
2498db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2499db7b5426Sblueswir1 {
2500db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2501db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2502db7b5426Sblueswir1 #endif
2503db7b5426Sblueswir1 
2504db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2505db7b5426Sblueswir1 }
2506db7b5426Sblueswir1 
2507db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2508db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2509db7b5426Sblueswir1 {
2510db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2511db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2512db7b5426Sblueswir1 #endif
2513db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2514db7b5426Sblueswir1 }
2515db7b5426Sblueswir1 
2516db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2517db7b5426Sblueswir1     &subpage_readb,
2518db7b5426Sblueswir1     &subpage_readw,
2519db7b5426Sblueswir1     &subpage_readl,
2520db7b5426Sblueswir1 };
2521db7b5426Sblueswir1 
2522db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2523db7b5426Sblueswir1     &subpage_writeb,
2524db7b5426Sblueswir1     &subpage_writew,
2525db7b5426Sblueswir1     &subpage_writel,
2526db7b5426Sblueswir1 };
2527db7b5426Sblueswir1 
2528db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
252900f82b8aSaurel32                              ram_addr_t memory)
2530db7b5426Sblueswir1 {
2531db7b5426Sblueswir1     int idx, eidx;
25324254fab8Sblueswir1     unsigned int i;
2533db7b5426Sblueswir1 
2534db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2535db7b5426Sblueswir1         return -1;
2536db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2537db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2538db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2539db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2540db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2541db7b5426Sblueswir1 #endif
2542db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2543db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
25444254fab8Sblueswir1         for (i = 0; i < 4; i++) {
25453ee89922Sblueswir1             if (io_mem_read[memory][i]) {
25463ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
25473ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
25484254fab8Sblueswir1             }
25493ee89922Sblueswir1             if (io_mem_write[memory][i]) {
25503ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
25513ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
25523ee89922Sblueswir1             }
25533ee89922Sblueswir1         }
2554db7b5426Sblueswir1     }
2555db7b5426Sblueswir1 
2556db7b5426Sblueswir1     return 0;
2557db7b5426Sblueswir1 }
2558db7b5426Sblueswir1 
255900f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
256000f82b8aSaurel32                            ram_addr_t orig_memory)
2561db7b5426Sblueswir1 {
2562db7b5426Sblueswir1     subpage_t *mmio;
2563db7b5426Sblueswir1     int subpage_memory;
2564db7b5426Sblueswir1 
2565db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
2566db7b5426Sblueswir1     if (mmio != NULL) {
2567db7b5426Sblueswir1         mmio->base = base;
2568db7b5426Sblueswir1         subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2569db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2570db7b5426Sblueswir1         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2571db7b5426Sblueswir1                mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2572db7b5426Sblueswir1 #endif
2573db7b5426Sblueswir1         *phys = subpage_memory | IO_MEM_SUBPAGE;
2574db7b5426Sblueswir1         subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2575db7b5426Sblueswir1     }
2576db7b5426Sblueswir1 
2577db7b5426Sblueswir1     return mmio;
2578db7b5426Sblueswir1 }
2579db7b5426Sblueswir1 
258033417e70Sbellard static void io_mem_init(void)
258133417e70Sbellard {
25823a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2583a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
25843a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
25851ccde1cbSbellard     io_mem_nb = 5;
25861ccde1cbSbellard 
25876658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
25886658ffb8Spbrook     io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
25896658ffb8Spbrook                                           watch_mem_write, NULL);
25906658ffb8Spbrook #endif
25911ccde1cbSbellard     /* alloc dirty bits array */
25920a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
25933a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
259433417e70Sbellard }
259533417e70Sbellard 
259633417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
259733417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
25983ee89922Sblueswir1    2). Functions can be omitted with a NULL function pointer. The
25993ee89922Sblueswir1    registered functions may be modified dynamically later.
26003ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
26014254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
26024254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
26034254fab8Sblueswir1    returned if error. */
260433417e70Sbellard int cpu_register_io_memory(int io_index,
260533417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2606a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2607a4193c8aSbellard                            void *opaque)
260833417e70Sbellard {
26094254fab8Sblueswir1     int i, subwidth = 0;
261033417e70Sbellard 
261133417e70Sbellard     if (io_index <= 0) {
2612b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
261333417e70Sbellard             return -1;
261433417e70Sbellard         io_index = io_mem_nb++;
261533417e70Sbellard     } else {
261633417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
261733417e70Sbellard             return -1;
261833417e70Sbellard     }
261933417e70Sbellard 
262033417e70Sbellard     for(i = 0;i < 3; i++) {
26214254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
26224254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
262333417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
262433417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
262533417e70Sbellard     }
2626a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
26274254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
262833417e70Sbellard }
262961382a50Sbellard 
26308926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
26318926b517Sbellard {
26328926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
26338926b517Sbellard }
26348926b517Sbellard 
26358926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
26368926b517Sbellard {
26378926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
26388926b517Sbellard }
26398926b517Sbellard 
264013eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
264113eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
26422e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
264313eb76e0Sbellard                             int len, int is_write)
264413eb76e0Sbellard {
264513eb76e0Sbellard     int l, flags;
264613eb76e0Sbellard     target_ulong page;
264753a5960aSpbrook     void * p;
264813eb76e0Sbellard 
264913eb76e0Sbellard     while (len > 0) {
265013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
265113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
265213eb76e0Sbellard         if (l > len)
265313eb76e0Sbellard             l = len;
265413eb76e0Sbellard         flags = page_get_flags(page);
265513eb76e0Sbellard         if (!(flags & PAGE_VALID))
265613eb76e0Sbellard             return;
265713eb76e0Sbellard         if (is_write) {
265813eb76e0Sbellard             if (!(flags & PAGE_WRITE))
265913eb76e0Sbellard                 return;
2660579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
266172fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2662579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2663579a97f7Sbellard                 return;
266472fb7daaSaurel32             memcpy(p, buf, l);
266572fb7daaSaurel32             unlock_user(p, addr, l);
266613eb76e0Sbellard         } else {
266713eb76e0Sbellard             if (!(flags & PAGE_READ))
266813eb76e0Sbellard                 return;
2669579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
267072fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2671579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2672579a97f7Sbellard                 return;
267372fb7daaSaurel32             memcpy(buf, p, l);
26745b257578Saurel32             unlock_user(p, addr, 0);
267513eb76e0Sbellard         }
267613eb76e0Sbellard         len -= l;
267713eb76e0Sbellard         buf += l;
267813eb76e0Sbellard         addr += l;
267913eb76e0Sbellard     }
268013eb76e0Sbellard }
26818df1cd07Sbellard 
268213eb76e0Sbellard #else
26832e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
268413eb76e0Sbellard                             int len, int is_write)
268513eb76e0Sbellard {
268613eb76e0Sbellard     int l, io_index;
268713eb76e0Sbellard     uint8_t *ptr;
268813eb76e0Sbellard     uint32_t val;
26892e12669aSbellard     target_phys_addr_t page;
26902e12669aSbellard     unsigned long pd;
269192e873b9Sbellard     PhysPageDesc *p;
269213eb76e0Sbellard 
269313eb76e0Sbellard     while (len > 0) {
269413eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
269513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
269613eb76e0Sbellard         if (l > len)
269713eb76e0Sbellard             l = len;
269892e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
269913eb76e0Sbellard         if (!p) {
270013eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
270113eb76e0Sbellard         } else {
270213eb76e0Sbellard             pd = p->phys_offset;
270313eb76e0Sbellard         }
270413eb76e0Sbellard 
270513eb76e0Sbellard         if (is_write) {
27063a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
270713eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27086a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
27096a00d601Sbellard                    potential bugs */
271013eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
27111c213d19Sbellard                     /* 32 bit write access */
2712c27004ecSbellard                     val = ldl_p(buf);
2713a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
271413eb76e0Sbellard                     l = 4;
271513eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
27161c213d19Sbellard                     /* 16 bit write access */
2717c27004ecSbellard                     val = lduw_p(buf);
2718a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
271913eb76e0Sbellard                     l = 2;
272013eb76e0Sbellard                 } else {
27211c213d19Sbellard                     /* 8 bit write access */
2722c27004ecSbellard                     val = ldub_p(buf);
2723a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
272413eb76e0Sbellard                     l = 1;
272513eb76e0Sbellard                 }
272613eb76e0Sbellard             } else {
2727b448f2f3Sbellard                 unsigned long addr1;
2728b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
272913eb76e0Sbellard                 /* RAM case */
2730b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
273113eb76e0Sbellard                 memcpy(ptr, buf, l);
27323a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2733b448f2f3Sbellard                     /* invalidate code */
2734b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2735b448f2f3Sbellard                     /* set dirty bit */
2736f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2737f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
273813eb76e0Sbellard                 }
27393a7d929eSbellard             }
274013eb76e0Sbellard         } else {
27412a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
27422a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
274313eb76e0Sbellard                 /* I/O case */
274413eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
274513eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
274613eb76e0Sbellard                     /* 32 bit read access */
2747a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2748c27004ecSbellard                     stl_p(buf, val);
274913eb76e0Sbellard                     l = 4;
275013eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
275113eb76e0Sbellard                     /* 16 bit read access */
2752a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2753c27004ecSbellard                     stw_p(buf, val);
275413eb76e0Sbellard                     l = 2;
275513eb76e0Sbellard                 } else {
27561c213d19Sbellard                     /* 8 bit read access */
2757a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2758c27004ecSbellard                     stb_p(buf, val);
275913eb76e0Sbellard                     l = 1;
276013eb76e0Sbellard                 }
276113eb76e0Sbellard             } else {
276213eb76e0Sbellard                 /* RAM case */
276313eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
276413eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
276513eb76e0Sbellard                 memcpy(buf, ptr, l);
276613eb76e0Sbellard             }
276713eb76e0Sbellard         }
276813eb76e0Sbellard         len -= l;
276913eb76e0Sbellard         buf += l;
277013eb76e0Sbellard         addr += l;
277113eb76e0Sbellard     }
277213eb76e0Sbellard }
27738df1cd07Sbellard 
2774d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2775d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2776d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2777d0ecd2aaSbellard {
2778d0ecd2aaSbellard     int l;
2779d0ecd2aaSbellard     uint8_t *ptr;
2780d0ecd2aaSbellard     target_phys_addr_t page;
2781d0ecd2aaSbellard     unsigned long pd;
2782d0ecd2aaSbellard     PhysPageDesc *p;
2783d0ecd2aaSbellard 
2784d0ecd2aaSbellard     while (len > 0) {
2785d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2786d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2787d0ecd2aaSbellard         if (l > len)
2788d0ecd2aaSbellard             l = len;
2789d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2790d0ecd2aaSbellard         if (!p) {
2791d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2792d0ecd2aaSbellard         } else {
2793d0ecd2aaSbellard             pd = p->phys_offset;
2794d0ecd2aaSbellard         }
2795d0ecd2aaSbellard 
2796d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
27972a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
27982a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2799d0ecd2aaSbellard             /* do nothing */
2800d0ecd2aaSbellard         } else {
2801d0ecd2aaSbellard             unsigned long addr1;
2802d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2803d0ecd2aaSbellard             /* ROM/RAM case */
2804d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2805d0ecd2aaSbellard             memcpy(ptr, buf, l);
2806d0ecd2aaSbellard         }
2807d0ecd2aaSbellard         len -= l;
2808d0ecd2aaSbellard         buf += l;
2809d0ecd2aaSbellard         addr += l;
2810d0ecd2aaSbellard     }
2811d0ecd2aaSbellard }
2812d0ecd2aaSbellard 
2813d0ecd2aaSbellard 
28148df1cd07Sbellard /* warning: addr must be aligned */
28158df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
28168df1cd07Sbellard {
28178df1cd07Sbellard     int io_index;
28188df1cd07Sbellard     uint8_t *ptr;
28198df1cd07Sbellard     uint32_t val;
28208df1cd07Sbellard     unsigned long pd;
28218df1cd07Sbellard     PhysPageDesc *p;
28228df1cd07Sbellard 
28238df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
28248df1cd07Sbellard     if (!p) {
28258df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
28268df1cd07Sbellard     } else {
28278df1cd07Sbellard         pd = p->phys_offset;
28288df1cd07Sbellard     }
28298df1cd07Sbellard 
28302a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
28312a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
28328df1cd07Sbellard         /* I/O case */
28338df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
28348df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
28358df1cd07Sbellard     } else {
28368df1cd07Sbellard         /* RAM case */
28378df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
28388df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
28398df1cd07Sbellard         val = ldl_p(ptr);
28408df1cd07Sbellard     }
28418df1cd07Sbellard     return val;
28428df1cd07Sbellard }
28438df1cd07Sbellard 
284484b7b8e7Sbellard /* warning: addr must be aligned */
284584b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
284684b7b8e7Sbellard {
284784b7b8e7Sbellard     int io_index;
284884b7b8e7Sbellard     uint8_t *ptr;
284984b7b8e7Sbellard     uint64_t val;
285084b7b8e7Sbellard     unsigned long pd;
285184b7b8e7Sbellard     PhysPageDesc *p;
285284b7b8e7Sbellard 
285384b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
285484b7b8e7Sbellard     if (!p) {
285584b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
285684b7b8e7Sbellard     } else {
285784b7b8e7Sbellard         pd = p->phys_offset;
285884b7b8e7Sbellard     }
285984b7b8e7Sbellard 
28602a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
28612a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
286284b7b8e7Sbellard         /* I/O case */
286384b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
286484b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
286584b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
286684b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
286784b7b8e7Sbellard #else
286884b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
286984b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
287084b7b8e7Sbellard #endif
287184b7b8e7Sbellard     } else {
287284b7b8e7Sbellard         /* RAM case */
287384b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
287484b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
287584b7b8e7Sbellard         val = ldq_p(ptr);
287684b7b8e7Sbellard     }
287784b7b8e7Sbellard     return val;
287884b7b8e7Sbellard }
287984b7b8e7Sbellard 
2880aab33094Sbellard /* XXX: optimize */
2881aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2882aab33094Sbellard {
2883aab33094Sbellard     uint8_t val;
2884aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2885aab33094Sbellard     return val;
2886aab33094Sbellard }
2887aab33094Sbellard 
2888aab33094Sbellard /* XXX: optimize */
2889aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2890aab33094Sbellard {
2891aab33094Sbellard     uint16_t val;
2892aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2893aab33094Sbellard     return tswap16(val);
2894aab33094Sbellard }
2895aab33094Sbellard 
28968df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
28978df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
28988df1cd07Sbellard    bits are used to track modified PTEs */
28998df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
29008df1cd07Sbellard {
29018df1cd07Sbellard     int io_index;
29028df1cd07Sbellard     uint8_t *ptr;
29038df1cd07Sbellard     unsigned long pd;
29048df1cd07Sbellard     PhysPageDesc *p;
29058df1cd07Sbellard 
29068df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
29078df1cd07Sbellard     if (!p) {
29088df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
29098df1cd07Sbellard     } else {
29108df1cd07Sbellard         pd = p->phys_offset;
29118df1cd07Sbellard     }
29128df1cd07Sbellard 
29133a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29148df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29158df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
29168df1cd07Sbellard     } else {
29178df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
29188df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
29198df1cd07Sbellard         stl_p(ptr, val);
29208df1cd07Sbellard     }
29218df1cd07Sbellard }
29228df1cd07Sbellard 
2923bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2924bc98a7efSj_mayer {
2925bc98a7efSj_mayer     int io_index;
2926bc98a7efSj_mayer     uint8_t *ptr;
2927bc98a7efSj_mayer     unsigned long pd;
2928bc98a7efSj_mayer     PhysPageDesc *p;
2929bc98a7efSj_mayer 
2930bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2931bc98a7efSj_mayer     if (!p) {
2932bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
2933bc98a7efSj_mayer     } else {
2934bc98a7efSj_mayer         pd = p->phys_offset;
2935bc98a7efSj_mayer     }
2936bc98a7efSj_mayer 
2937bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
2940bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2941bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2942bc98a7efSj_mayer #else
2943bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2944bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2945bc98a7efSj_mayer #endif
2946bc98a7efSj_mayer     } else {
2947bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2948bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
2949bc98a7efSj_mayer         stq_p(ptr, val);
2950bc98a7efSj_mayer     }
2951bc98a7efSj_mayer }
2952bc98a7efSj_mayer 
29538df1cd07Sbellard /* warning: addr must be aligned */
29548df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
29558df1cd07Sbellard {
29568df1cd07Sbellard     int io_index;
29578df1cd07Sbellard     uint8_t *ptr;
29588df1cd07Sbellard     unsigned long pd;
29598df1cd07Sbellard     PhysPageDesc *p;
29608df1cd07Sbellard 
29618df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
29628df1cd07Sbellard     if (!p) {
29638df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
29648df1cd07Sbellard     } else {
29658df1cd07Sbellard         pd = p->phys_offset;
29668df1cd07Sbellard     }
29678df1cd07Sbellard 
29683a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29698df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29708df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
29718df1cd07Sbellard     } else {
29728df1cd07Sbellard         unsigned long addr1;
29738df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
29748df1cd07Sbellard         /* RAM case */
29758df1cd07Sbellard         ptr = phys_ram_base + addr1;
29768df1cd07Sbellard         stl_p(ptr, val);
29773a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
29788df1cd07Sbellard             /* invalidate code */
29798df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
29808df1cd07Sbellard             /* set dirty bit */
2981f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2982f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
29838df1cd07Sbellard         }
29848df1cd07Sbellard     }
29853a7d929eSbellard }
29868df1cd07Sbellard 
2987aab33094Sbellard /* XXX: optimize */
2988aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2989aab33094Sbellard {
2990aab33094Sbellard     uint8_t v = val;
2991aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2992aab33094Sbellard }
2993aab33094Sbellard 
2994aab33094Sbellard /* XXX: optimize */
2995aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2996aab33094Sbellard {
2997aab33094Sbellard     uint16_t v = tswap16(val);
2998aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2999aab33094Sbellard }
3000aab33094Sbellard 
3001aab33094Sbellard /* XXX: optimize */
3002aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
3003aab33094Sbellard {
3004aab33094Sbellard     val = tswap64(val);
3005aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3006aab33094Sbellard }
3007aab33094Sbellard 
300813eb76e0Sbellard #endif
300913eb76e0Sbellard 
301013eb76e0Sbellard /* virtual memory access for debug */
3011b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3012b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
301313eb76e0Sbellard {
301413eb76e0Sbellard     int l;
30159b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
30169b3c35e0Sj_mayer     target_ulong page;
301713eb76e0Sbellard 
301813eb76e0Sbellard     while (len > 0) {
301913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
302013eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
302113eb76e0Sbellard         /* if no physical page mapped, return an error */
302213eb76e0Sbellard         if (phys_addr == -1)
302313eb76e0Sbellard             return -1;
302413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
302513eb76e0Sbellard         if (l > len)
302613eb76e0Sbellard             l = len;
3027b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3028b448f2f3Sbellard                                buf, l, is_write);
302913eb76e0Sbellard         len -= l;
303013eb76e0Sbellard         buf += l;
303113eb76e0Sbellard         addr += l;
303213eb76e0Sbellard     }
303313eb76e0Sbellard     return 0;
303413eb76e0Sbellard }
303513eb76e0Sbellard 
3036e3db7226Sbellard void dump_exec_info(FILE *f,
3037e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3038e3db7226Sbellard {
3039e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3040e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3041e3db7226Sbellard     TranslationBlock *tb;
3042e3db7226Sbellard 
3043e3db7226Sbellard     target_code_size = 0;
3044e3db7226Sbellard     max_target_code_size = 0;
3045e3db7226Sbellard     cross_page = 0;
3046e3db7226Sbellard     direct_jmp_count = 0;
3047e3db7226Sbellard     direct_jmp2_count = 0;
3048e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3049e3db7226Sbellard         tb = &tbs[i];
3050e3db7226Sbellard         target_code_size += tb->size;
3051e3db7226Sbellard         if (tb->size > max_target_code_size)
3052e3db7226Sbellard             max_target_code_size = tb->size;
3053e3db7226Sbellard         if (tb->page_addr[1] != -1)
3054e3db7226Sbellard             cross_page++;
3055e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3056e3db7226Sbellard             direct_jmp_count++;
3057e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3058e3db7226Sbellard                 direct_jmp2_count++;
3059e3db7226Sbellard             }
3060e3db7226Sbellard         }
3061e3db7226Sbellard     }
3062e3db7226Sbellard     /* XXX: avoid using doubles ? */
306357fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
306426a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
306526a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
306626a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
306726a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3068e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3069e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3070e3db7226Sbellard                 max_target_code_size);
3071e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3072e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3073e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3074e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3075e3db7226Sbellard             cross_page,
3076e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3077e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3078e3db7226Sbellard                 direct_jmp_count,
3079e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3080e3db7226Sbellard                 direct_jmp2_count,
3081e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
308257fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3083e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3084e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3085e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3086b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3087e3db7226Sbellard }
3088e3db7226Sbellard 
308961382a50Sbellard #if !defined(CONFIG_USER_ONLY)
309061382a50Sbellard 
309161382a50Sbellard #define MMUSUFFIX _cmmu
309261382a50Sbellard #define GETPC() NULL
309361382a50Sbellard #define env cpu_single_env
3094b769d8feSbellard #define SOFTMMU_CODE_ACCESS
309561382a50Sbellard 
309661382a50Sbellard #define SHIFT 0
309761382a50Sbellard #include "softmmu_template.h"
309861382a50Sbellard 
309961382a50Sbellard #define SHIFT 1
310061382a50Sbellard #include "softmmu_template.h"
310161382a50Sbellard 
310261382a50Sbellard #define SHIFT 2
310361382a50Sbellard #include "softmmu_template.h"
310461382a50Sbellard 
310561382a50Sbellard #define SHIFT 3
310661382a50Sbellard #include "softmmu_template.h"
310761382a50Sbellard 
310861382a50Sbellard #undef env
310961382a50Sbellard 
311061382a50Sbellard #endif
3111