xref: /qemu/system/physmem.c (revision 9656f324d25895ec16ebc5eaf624e28a96c1f1be)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
224fddf62aSths #define WIN32_LEAN_AND_MEAN
23d5a8f07cSbellard #include <windows.h>
24d5a8f07cSbellard #else
25a98d49b1Sbellard #include <sys/types.h>
26d5a8f07cSbellard #include <sys/mman.h>
27d5a8f07cSbellard #endif
2854936004Sbellard #include <stdlib.h>
2954936004Sbellard #include <stdio.h>
3054936004Sbellard #include <stdarg.h>
3154936004Sbellard #include <string.h>
3254936004Sbellard #include <errno.h>
3354936004Sbellard #include <unistd.h>
3454936004Sbellard #include <inttypes.h>
3554936004Sbellard 
366180a181Sbellard #include "cpu.h"
376180a181Sbellard #include "exec-all.h"
38ca10f867Saurel32 #include "qemu-common.h"
39b67d9a52Sbellard #include "tcg.h"
40b3c7724cSpbrook #include "hw/hw.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
4353a5960aSpbrook #endif
4454936004Sbellard 
45fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4666e85a21Sbellard //#define DEBUG_FLUSH
479fa3e853Sbellard //#define DEBUG_TLB
4867d3b957Spbrook //#define DEBUG_UNASSIGNED
49fd6ce8f6Sbellard 
50fd6ce8f6Sbellard /* make various TB consistency checks */
51fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5298857888Sbellard //#define DEBUG_TLB_CHECK
53fd6ce8f6Sbellard 
541196be37Sths //#define DEBUG_IOPORT
55db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
561196be37Sths 
5799773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5899773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5999773bd4Spbrook #undef DEBUG_TB_CHECK
6099773bd4Spbrook #endif
6199773bd4Spbrook 
629fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
639fa3e853Sbellard 
649fa3e853Sbellard #define MMAP_AREA_START        0x00000000
659fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
66fd6ce8f6Sbellard 
67108c49b8Sbellard #if defined(TARGET_SPARC64)
68108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
695dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
705dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
72bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
73bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
74108c49b8Sbellard #elif defined(TARGET_PPC64)
75108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
7600f82b8aSaurel32 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
7700f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
7800f82b8aSaurel32 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
7900f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80108c49b8Sbellard #else
81108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
83108c49b8Sbellard #endif
84108c49b8Sbellard 
85fab94c0eSpbrook TranslationBlock *tbs;
8626a5f13bSbellard int code_gen_max_blocks;
879fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88fd6ce8f6Sbellard int nb_tbs;
89eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
90eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91fd6ce8f6Sbellard 
927cb69caeSbellard uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
9326a5f13bSbellard uint8_t *code_gen_buffer;
9426a5f13bSbellard unsigned long code_gen_buffer_size;
9526a5f13bSbellard /* threshold to flush the translated code buffer */
9626a5f13bSbellard unsigned long code_gen_buffer_max_size;
97fd6ce8f6Sbellard uint8_t *code_gen_ptr;
98fd6ce8f6Sbellard 
99e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
10000f82b8aSaurel32 ram_addr_t phys_ram_size;
1019fa3e853Sbellard int phys_ram_fd;
1029fa3e853Sbellard uint8_t *phys_ram_base;
1031ccde1cbSbellard uint8_t *phys_ram_dirty;
104e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
105e2eef170Spbrook #endif
1069fa3e853Sbellard 
1076a00d601Sbellard CPUState *first_cpu;
1086a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1096a00d601Sbellard    cpu_exec() */
1106a00d601Sbellard CPUState *cpu_single_env;
1112e70f6efSpbrook /* 0 = Do not count executed instructions.
112bf20dc07Sths    1 = Precise instruction counting.
1132e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1142e70f6efSpbrook int use_icount = 0;
1152e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1162e70f6efSpbrook    include some instructions that have not yet been executed.  */
1172e70f6efSpbrook int64_t qemu_icount;
1186a00d601Sbellard 
11954936004Sbellard typedef struct PageDesc {
12092e873b9Sbellard     /* list of TBs intersecting this ram page */
121fd6ce8f6Sbellard     TranslationBlock *first_tb;
1229fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1239fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1249fa3e853Sbellard     unsigned int code_write_count;
1259fa3e853Sbellard     uint8_t *code_bitmap;
1269fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1279fa3e853Sbellard     unsigned long flags;
1289fa3e853Sbellard #endif
12954936004Sbellard } PageDesc;
13054936004Sbellard 
13192e873b9Sbellard typedef struct PhysPageDesc {
1320f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
13300f82b8aSaurel32     ram_addr_t phys_offset;
13492e873b9Sbellard } PhysPageDesc;
13592e873b9Sbellard 
13654936004Sbellard #define L2_BITS 10
137bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
139bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
140bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
141bedb69eaSj_mayer  */
142bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
143bedb69eaSj_mayer #else
14403875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
145bedb69eaSj_mayer #endif
14654936004Sbellard 
14754936004Sbellard #define L1_SIZE (1 << L1_BITS)
14854936004Sbellard #define L2_SIZE (1 << L2_BITS)
14954936004Sbellard 
15083fb7adfSbellard unsigned long qemu_real_host_page_size;
15183fb7adfSbellard unsigned long qemu_host_page_bits;
15283fb7adfSbellard unsigned long qemu_host_page_size;
15383fb7adfSbellard unsigned long qemu_host_page_mask;
15454936004Sbellard 
15592e873b9Sbellard /* XXX: for system emulation, it could just be an array */
15654936004Sbellard static PageDesc *l1_map[L1_SIZE];
1570a962c02Sbellard PhysPageDesc **l1_phys_map;
15854936004Sbellard 
159e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
160e2eef170Spbrook static void io_mem_init(void);
161e2eef170Spbrook 
16233417e70Sbellard /* io memory support */
16333417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
16433417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
165a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
16633417e70Sbellard static int io_mem_nb;
1676658ffb8Spbrook static int io_mem_watch;
1686658ffb8Spbrook #endif
16933417e70Sbellard 
17034865134Sbellard /* log support */
17134865134Sbellard char *logfilename = "/tmp/qemu.log";
17234865134Sbellard FILE *logfile;
17334865134Sbellard int loglevel;
174e735b91cSpbrook static int log_append = 0;
17534865134Sbellard 
176e3db7226Sbellard /* statistics */
177e3db7226Sbellard static int tlb_flush_count;
178e3db7226Sbellard static int tb_flush_count;
179e3db7226Sbellard static int tb_phys_invalidate_count;
180e3db7226Sbellard 
181db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182db7b5426Sblueswir1 typedef struct subpage_t {
183db7b5426Sblueswir1     target_phys_addr_t base;
1843ee89922Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
1853ee89922Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
1863ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
187db7b5426Sblueswir1 } subpage_t;
188db7b5426Sblueswir1 
1897cb69caeSbellard #ifdef _WIN32
1907cb69caeSbellard static void map_exec(void *addr, long size)
1917cb69caeSbellard {
1927cb69caeSbellard     DWORD old_protect;
1937cb69caeSbellard     VirtualProtect(addr, size,
1947cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
1957cb69caeSbellard 
1967cb69caeSbellard }
1977cb69caeSbellard #else
1987cb69caeSbellard static void map_exec(void *addr, long size)
1997cb69caeSbellard {
2004369415fSbellard     unsigned long start, end, page_size;
2017cb69caeSbellard 
2024369415fSbellard     page_size = getpagesize();
2037cb69caeSbellard     start = (unsigned long)addr;
2044369415fSbellard     start &= ~(page_size - 1);
2057cb69caeSbellard 
2067cb69caeSbellard     end = (unsigned long)addr + size;
2074369415fSbellard     end += page_size - 1;
2084369415fSbellard     end &= ~(page_size - 1);
2097cb69caeSbellard 
2107cb69caeSbellard     mprotect((void *)start, end - start,
2117cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2127cb69caeSbellard }
2137cb69caeSbellard #endif
2147cb69caeSbellard 
215b346ff46Sbellard static void page_init(void)
21654936004Sbellard {
21783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
21854936004Sbellard        TARGET_PAGE_SIZE */
21967b915a5Sbellard #ifdef _WIN32
220d5a8f07cSbellard     {
221d5a8f07cSbellard         SYSTEM_INFO system_info;
222d5a8f07cSbellard         DWORD old_protect;
223d5a8f07cSbellard 
224d5a8f07cSbellard         GetSystemInfo(&system_info);
225d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
226d5a8f07cSbellard     }
22767b915a5Sbellard #else
22883fb7adfSbellard     qemu_real_host_page_size = getpagesize();
22967b915a5Sbellard #endif
23083fb7adfSbellard     if (qemu_host_page_size == 0)
23183fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
23283fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
23383fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
23483fb7adfSbellard     qemu_host_page_bits = 0;
23583fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
23683fb7adfSbellard         qemu_host_page_bits++;
23783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
238108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
24050a9569bSbalrog 
24150a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
24250a9569bSbalrog     {
24350a9569bSbalrog         long long startaddr, endaddr;
24450a9569bSbalrog         FILE *f;
24550a9569bSbalrog         int n;
24650a9569bSbalrog 
247c8a706feSpbrook         mmap_lock();
2480776590dSpbrook         last_brk = (unsigned long)sbrk(0);
24950a9569bSbalrog         f = fopen("/proc/self/maps", "r");
25050a9569bSbalrog         if (f) {
25150a9569bSbalrog             do {
25250a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
25350a9569bSbalrog                 if (n == 2) {
254e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
255e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
257e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
258b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
25950a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
26050a9569bSbalrog                                    PAGE_RESERVED);
26150a9569bSbalrog                 }
26250a9569bSbalrog             } while (!feof(f));
26350a9569bSbalrog             fclose(f);
26450a9569bSbalrog         }
265c8a706feSpbrook         mmap_unlock();
26650a9569bSbalrog     }
26750a9569bSbalrog #endif
26854936004Sbellard }
26954936004Sbellard 
27000f82b8aSaurel32 static inline PageDesc *page_find_alloc(target_ulong index)
27154936004Sbellard {
27254936004Sbellard     PageDesc **lp, *p;
27354936004Sbellard 
27417e2377aSpbrook #if TARGET_LONG_BITS > 32
27517e2377aSpbrook     /* Host memory outside guest VM.  For 32-bit targets we have already
27617e2377aSpbrook        excluded high addresses.  */
27717e2377aSpbrook     if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
27817e2377aSpbrook         return NULL;
27917e2377aSpbrook #endif
28054936004Sbellard     lp = &l1_map[index >> L2_BITS];
28154936004Sbellard     p = *lp;
28254936004Sbellard     if (!p) {
28354936004Sbellard         /* allocate if not found */
28417e2377aSpbrook #if defined(CONFIG_USER_ONLY)
28517e2377aSpbrook         unsigned long addr;
28617e2377aSpbrook         size_t len = sizeof(PageDesc) * L2_SIZE;
28717e2377aSpbrook         /* Don't use qemu_malloc because it may recurse.  */
28817e2377aSpbrook         p = mmap(0, len, PROT_READ | PROT_WRITE,
28917e2377aSpbrook                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
29054936004Sbellard         *lp = p;
29117e2377aSpbrook         addr = h2g(p);
29217e2377aSpbrook         if (addr == (target_ulong)addr) {
29317e2377aSpbrook             page_set_flags(addr & TARGET_PAGE_MASK,
29417e2377aSpbrook                            TARGET_PAGE_ALIGN(addr + len),
29517e2377aSpbrook                            PAGE_RESERVED);
29617e2377aSpbrook         }
29717e2377aSpbrook #else
29817e2377aSpbrook         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
29917e2377aSpbrook         *lp = p;
30017e2377aSpbrook #endif
30154936004Sbellard     }
30254936004Sbellard     return p + (index & (L2_SIZE - 1));
30354936004Sbellard }
30454936004Sbellard 
30500f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
30654936004Sbellard {
30754936004Sbellard     PageDesc *p;
30854936004Sbellard 
30954936004Sbellard     p = l1_map[index >> L2_BITS];
31054936004Sbellard     if (!p)
31154936004Sbellard         return 0;
312fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
31354936004Sbellard }
31454936004Sbellard 
315108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
31692e873b9Sbellard {
317108c49b8Sbellard     void **lp, **p;
318e3f4e2a4Spbrook     PhysPageDesc *pd;
31992e873b9Sbellard 
320108c49b8Sbellard     p = (void **)l1_phys_map;
321108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
322108c49b8Sbellard 
323108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
325108c49b8Sbellard #endif
326108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
32792e873b9Sbellard     p = *lp;
32892e873b9Sbellard     if (!p) {
32992e873b9Sbellard         /* allocate if not found */
330108c49b8Sbellard         if (!alloc)
331108c49b8Sbellard             return NULL;
332108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
334108c49b8Sbellard         *lp = p;
335108c49b8Sbellard     }
336108c49b8Sbellard #endif
337108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
338e3f4e2a4Spbrook     pd = *lp;
339e3f4e2a4Spbrook     if (!pd) {
340e3f4e2a4Spbrook         int i;
341108c49b8Sbellard         /* allocate if not found */
342108c49b8Sbellard         if (!alloc)
343108c49b8Sbellard             return NULL;
344e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
345e3f4e2a4Spbrook         *lp = pd;
346e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
347e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
34892e873b9Sbellard     }
349e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
35092e873b9Sbellard }
35192e873b9Sbellard 
352108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
35392e873b9Sbellard {
354108c49b8Sbellard     return phys_page_find_alloc(index, 0);
35592e873b9Sbellard }
35692e873b9Sbellard 
3579fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3586a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3593a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3603a7d929eSbellard                                     target_ulong vaddr);
361c8a706feSpbrook #define mmap_lock() do { } while(0)
362c8a706feSpbrook #define mmap_unlock() do { } while(0)
3639fa3e853Sbellard #endif
364fd6ce8f6Sbellard 
3654369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
3664369415fSbellard 
3674369415fSbellard #if defined(CONFIG_USER_ONLY)
3684369415fSbellard /* Currently it is not recommanded to allocate big chunks of data in
3694369415fSbellard    user mode. It will change when a dedicated libc will be used */
3704369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
3714369415fSbellard #endif
3724369415fSbellard 
3734369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
3744369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
3754369415fSbellard #endif
3764369415fSbellard 
37726a5f13bSbellard void code_gen_alloc(unsigned long tb_size)
37826a5f13bSbellard {
3794369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
3804369415fSbellard     code_gen_buffer = static_code_gen_buffer;
3814369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
3824369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
3834369415fSbellard #else
38426a5f13bSbellard     code_gen_buffer_size = tb_size;
38526a5f13bSbellard     if (code_gen_buffer_size == 0) {
3864369415fSbellard #if defined(CONFIG_USER_ONLY)
3874369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
3884369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
3894369415fSbellard #else
39026a5f13bSbellard         /* XXX: needs ajustments */
39126a5f13bSbellard         code_gen_buffer_size = (int)(phys_ram_size / 4);
3924369415fSbellard #endif
39326a5f13bSbellard     }
39426a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
39526a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
39626a5f13bSbellard     /* The code gen buffer location may have constraints depending on
39726a5f13bSbellard        the host cpu and OS */
39826a5f13bSbellard #if defined(__linux__)
39926a5f13bSbellard     {
40026a5f13bSbellard         int flags;
40126a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
40226a5f13bSbellard #if defined(__x86_64__)
40326a5f13bSbellard         flags |= MAP_32BIT;
40426a5f13bSbellard         /* Cannot map more than that */
40526a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
40626a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
40726a5f13bSbellard #endif
40826a5f13bSbellard         code_gen_buffer = mmap(NULL, code_gen_buffer_size,
40926a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
41026a5f13bSbellard                                flags, -1, 0);
41126a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
41226a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
41326a5f13bSbellard             exit(1);
41426a5f13bSbellard         }
41526a5f13bSbellard     }
41626a5f13bSbellard #else
41726a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
41826a5f13bSbellard     if (!code_gen_buffer) {
41926a5f13bSbellard         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
42026a5f13bSbellard         exit(1);
42126a5f13bSbellard     }
42226a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
42326a5f13bSbellard #endif
4244369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
42526a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
42626a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
42726a5f13bSbellard         code_gen_max_block_size();
42826a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
42926a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
43026a5f13bSbellard }
43126a5f13bSbellard 
43226a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
43326a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
43426a5f13bSbellard    size. */
43526a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
43626a5f13bSbellard {
43726a5f13bSbellard     cpu_gen_init();
43826a5f13bSbellard     code_gen_alloc(tb_size);
43926a5f13bSbellard     code_gen_ptr = code_gen_buffer;
4404369415fSbellard     page_init();
441e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
44226a5f13bSbellard     io_mem_init();
443e2eef170Spbrook #endif
44426a5f13bSbellard }
44526a5f13bSbellard 
4469656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
4479656f324Spbrook 
4489656f324Spbrook #define CPU_COMMON_SAVE_VERSION 1
4499656f324Spbrook 
4509656f324Spbrook static void cpu_common_save(QEMUFile *f, void *opaque)
4519656f324Spbrook {
4529656f324Spbrook     CPUState *env = opaque;
4539656f324Spbrook 
4549656f324Spbrook     qemu_put_be32s(f, &env->halted);
4559656f324Spbrook     qemu_put_be32s(f, &env->interrupt_request);
4569656f324Spbrook }
4579656f324Spbrook 
4589656f324Spbrook static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
4599656f324Spbrook {
4609656f324Spbrook     CPUState *env = opaque;
4619656f324Spbrook 
4629656f324Spbrook     if (version_id != CPU_COMMON_SAVE_VERSION)
4639656f324Spbrook         return -EINVAL;
4649656f324Spbrook 
4659656f324Spbrook     qemu_get_be32s(f, &env->halted);
4669656f324Spbrook     qemu_put_be32s(f, &env->interrupt_request);
4679656f324Spbrook     tlb_flush(env, 1);
4689656f324Spbrook 
4699656f324Spbrook     return 0;
4709656f324Spbrook }
4719656f324Spbrook #endif
4729656f324Spbrook 
4736a00d601Sbellard void cpu_exec_init(CPUState *env)
474fd6ce8f6Sbellard {
4756a00d601Sbellard     CPUState **penv;
4766a00d601Sbellard     int cpu_index;
4776a00d601Sbellard 
4786a00d601Sbellard     env->next_cpu = NULL;
4796a00d601Sbellard     penv = &first_cpu;
4806a00d601Sbellard     cpu_index = 0;
4816a00d601Sbellard     while (*penv != NULL) {
4826a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
4836a00d601Sbellard         cpu_index++;
4846a00d601Sbellard     }
4856a00d601Sbellard     env->cpu_index = cpu_index;
4866658ffb8Spbrook     env->nb_watchpoints = 0;
4876a00d601Sbellard     *penv = env;
488b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
4899656f324Spbrook     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
4909656f324Spbrook                     cpu_common_save, cpu_common_load, env);
491b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
492b3c7724cSpbrook                     cpu_save, cpu_load, env);
493b3c7724cSpbrook #endif
494fd6ce8f6Sbellard }
495fd6ce8f6Sbellard 
4969fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
4979fa3e853Sbellard {
4989fa3e853Sbellard     if (p->code_bitmap) {
49959817ccbSbellard         qemu_free(p->code_bitmap);
5009fa3e853Sbellard         p->code_bitmap = NULL;
5019fa3e853Sbellard     }
5029fa3e853Sbellard     p->code_write_count = 0;
5039fa3e853Sbellard }
5049fa3e853Sbellard 
505fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
506fd6ce8f6Sbellard static void page_flush_tb(void)
507fd6ce8f6Sbellard {
508fd6ce8f6Sbellard     int i, j;
509fd6ce8f6Sbellard     PageDesc *p;
510fd6ce8f6Sbellard 
511fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
512fd6ce8f6Sbellard         p = l1_map[i];
513fd6ce8f6Sbellard         if (p) {
5149fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
5159fa3e853Sbellard                 p->first_tb = NULL;
5169fa3e853Sbellard                 invalidate_page_bitmap(p);
5179fa3e853Sbellard                 p++;
5189fa3e853Sbellard             }
519fd6ce8f6Sbellard         }
520fd6ce8f6Sbellard     }
521fd6ce8f6Sbellard }
522fd6ce8f6Sbellard 
523fd6ce8f6Sbellard /* flush all the translation blocks */
524d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
5256a00d601Sbellard void tb_flush(CPUState *env1)
526fd6ce8f6Sbellard {
5276a00d601Sbellard     CPUState *env;
5280124311eSbellard #if defined(DEBUG_FLUSH)
529ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
530ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
531ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
532ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
533fd6ce8f6Sbellard #endif
53426a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
535a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
536a208e54aSpbrook 
537fd6ce8f6Sbellard     nb_tbs = 0;
5386a00d601Sbellard 
5396a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
5408a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
5416a00d601Sbellard     }
5429fa3e853Sbellard 
5438a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
544fd6ce8f6Sbellard     page_flush_tb();
5459fa3e853Sbellard 
546fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
547d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
548d4e8164fSbellard        expensive */
549e3db7226Sbellard     tb_flush_count++;
550fd6ce8f6Sbellard }
551fd6ce8f6Sbellard 
552fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
553fd6ce8f6Sbellard 
554bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
555fd6ce8f6Sbellard {
556fd6ce8f6Sbellard     TranslationBlock *tb;
557fd6ce8f6Sbellard     int i;
558fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
55999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
56099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
561fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
562fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
563fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
56499773bd4Spbrook                        address, (long)tb->pc, tb->size);
565fd6ce8f6Sbellard             }
566fd6ce8f6Sbellard         }
567fd6ce8f6Sbellard     }
568fd6ce8f6Sbellard }
569fd6ce8f6Sbellard 
570fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
571fd6ce8f6Sbellard static void tb_page_check(void)
572fd6ce8f6Sbellard {
573fd6ce8f6Sbellard     TranslationBlock *tb;
574fd6ce8f6Sbellard     int i, flags1, flags2;
575fd6ce8f6Sbellard 
57699773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
57799773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
578fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
579fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
580fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
581fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
58299773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
583fd6ce8f6Sbellard             }
584fd6ce8f6Sbellard         }
585fd6ce8f6Sbellard     }
586fd6ce8f6Sbellard }
587fd6ce8f6Sbellard 
588d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
589d4e8164fSbellard {
590d4e8164fSbellard     TranslationBlock *tb1;
591d4e8164fSbellard     unsigned int n1;
592d4e8164fSbellard 
593d4e8164fSbellard     /* suppress any remaining jumps to this TB */
594d4e8164fSbellard     tb1 = tb->jmp_first;
595d4e8164fSbellard     for(;;) {
596d4e8164fSbellard         n1 = (long)tb1 & 3;
597d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
598d4e8164fSbellard         if (n1 == 2)
599d4e8164fSbellard             break;
600d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
601d4e8164fSbellard     }
602d4e8164fSbellard     /* check end of list */
603d4e8164fSbellard     if (tb1 != tb) {
604d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
605d4e8164fSbellard     }
606d4e8164fSbellard }
607d4e8164fSbellard 
608fd6ce8f6Sbellard #endif
609fd6ce8f6Sbellard 
610fd6ce8f6Sbellard /* invalidate one TB */
611fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
612fd6ce8f6Sbellard                              int next_offset)
613fd6ce8f6Sbellard {
614fd6ce8f6Sbellard     TranslationBlock *tb1;
615fd6ce8f6Sbellard     for(;;) {
616fd6ce8f6Sbellard         tb1 = *ptb;
617fd6ce8f6Sbellard         if (tb1 == tb) {
618fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
619fd6ce8f6Sbellard             break;
620fd6ce8f6Sbellard         }
621fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
622fd6ce8f6Sbellard     }
623fd6ce8f6Sbellard }
624fd6ce8f6Sbellard 
6259fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
6269fa3e853Sbellard {
6279fa3e853Sbellard     TranslationBlock *tb1;
6289fa3e853Sbellard     unsigned int n1;
6299fa3e853Sbellard 
6309fa3e853Sbellard     for(;;) {
6319fa3e853Sbellard         tb1 = *ptb;
6329fa3e853Sbellard         n1 = (long)tb1 & 3;
6339fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
6349fa3e853Sbellard         if (tb1 == tb) {
6359fa3e853Sbellard             *ptb = tb1->page_next[n1];
6369fa3e853Sbellard             break;
6379fa3e853Sbellard         }
6389fa3e853Sbellard         ptb = &tb1->page_next[n1];
6399fa3e853Sbellard     }
6409fa3e853Sbellard }
6419fa3e853Sbellard 
642d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
643d4e8164fSbellard {
644d4e8164fSbellard     TranslationBlock *tb1, **ptb;
645d4e8164fSbellard     unsigned int n1;
646d4e8164fSbellard 
647d4e8164fSbellard     ptb = &tb->jmp_next[n];
648d4e8164fSbellard     tb1 = *ptb;
649d4e8164fSbellard     if (tb1) {
650d4e8164fSbellard         /* find tb(n) in circular list */
651d4e8164fSbellard         for(;;) {
652d4e8164fSbellard             tb1 = *ptb;
653d4e8164fSbellard             n1 = (long)tb1 & 3;
654d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
655d4e8164fSbellard             if (n1 == n && tb1 == tb)
656d4e8164fSbellard                 break;
657d4e8164fSbellard             if (n1 == 2) {
658d4e8164fSbellard                 ptb = &tb1->jmp_first;
659d4e8164fSbellard             } else {
660d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
661d4e8164fSbellard             }
662d4e8164fSbellard         }
663d4e8164fSbellard         /* now we can suppress tb(n) from the list */
664d4e8164fSbellard         *ptb = tb->jmp_next[n];
665d4e8164fSbellard 
666d4e8164fSbellard         tb->jmp_next[n] = NULL;
667d4e8164fSbellard     }
668d4e8164fSbellard }
669d4e8164fSbellard 
670d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
671d4e8164fSbellard    another TB */
672d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
673d4e8164fSbellard {
674d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
675d4e8164fSbellard }
676d4e8164fSbellard 
6772e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
678fd6ce8f6Sbellard {
6796a00d601Sbellard     CPUState *env;
680fd6ce8f6Sbellard     PageDesc *p;
6818a40a180Sbellard     unsigned int h, n1;
68200f82b8aSaurel32     target_phys_addr_t phys_pc;
6838a40a180Sbellard     TranslationBlock *tb1, *tb2;
684fd6ce8f6Sbellard 
6859fa3e853Sbellard     /* remove the TB from the hash list */
6869fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6879fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
6889fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
6899fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
6909fa3e853Sbellard 
6919fa3e853Sbellard     /* remove the TB from the page list */
6929fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
6939fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
6949fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
6959fa3e853Sbellard         invalidate_page_bitmap(p);
6969fa3e853Sbellard     }
6979fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
6989fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
6999fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7009fa3e853Sbellard         invalidate_page_bitmap(p);
7019fa3e853Sbellard     }
7029fa3e853Sbellard 
7038a40a180Sbellard     tb_invalidated_flag = 1;
7048a40a180Sbellard 
7058a40a180Sbellard     /* remove the TB from the hash list */
7068a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
7076a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7086a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
7096a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
7106a00d601Sbellard     }
7118a40a180Sbellard 
7128a40a180Sbellard     /* suppress this TB from the two jump lists */
7138a40a180Sbellard     tb_jmp_remove(tb, 0);
7148a40a180Sbellard     tb_jmp_remove(tb, 1);
7158a40a180Sbellard 
7168a40a180Sbellard     /* suppress any remaining jumps to this TB */
7178a40a180Sbellard     tb1 = tb->jmp_first;
7188a40a180Sbellard     for(;;) {
7198a40a180Sbellard         n1 = (long)tb1 & 3;
7208a40a180Sbellard         if (n1 == 2)
7218a40a180Sbellard             break;
7228a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7238a40a180Sbellard         tb2 = tb1->jmp_next[n1];
7248a40a180Sbellard         tb_reset_jump(tb1, n1);
7258a40a180Sbellard         tb1->jmp_next[n1] = NULL;
7268a40a180Sbellard         tb1 = tb2;
7278a40a180Sbellard     }
7288a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
7298a40a180Sbellard 
730e3db7226Sbellard     tb_phys_invalidate_count++;
7319fa3e853Sbellard }
7329fa3e853Sbellard 
7339fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
7349fa3e853Sbellard {
7359fa3e853Sbellard     int end, mask, end1;
7369fa3e853Sbellard 
7379fa3e853Sbellard     end = start + len;
7389fa3e853Sbellard     tab += start >> 3;
7399fa3e853Sbellard     mask = 0xff << (start & 7);
7409fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
7419fa3e853Sbellard         if (start < end) {
7429fa3e853Sbellard             mask &= ~(0xff << (end & 7));
7439fa3e853Sbellard             *tab |= mask;
7449fa3e853Sbellard         }
7459fa3e853Sbellard     } else {
7469fa3e853Sbellard         *tab++ |= mask;
7479fa3e853Sbellard         start = (start + 8) & ~7;
7489fa3e853Sbellard         end1 = end & ~7;
7499fa3e853Sbellard         while (start < end1) {
7509fa3e853Sbellard             *tab++ = 0xff;
7519fa3e853Sbellard             start += 8;
7529fa3e853Sbellard         }
7539fa3e853Sbellard         if (start < end) {
7549fa3e853Sbellard             mask = ~(0xff << (end & 7));
7559fa3e853Sbellard             *tab |= mask;
7569fa3e853Sbellard         }
7579fa3e853Sbellard     }
7589fa3e853Sbellard }
7599fa3e853Sbellard 
7609fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
7619fa3e853Sbellard {
7629fa3e853Sbellard     int n, tb_start, tb_end;
7639fa3e853Sbellard     TranslationBlock *tb;
7649fa3e853Sbellard 
765b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
7669fa3e853Sbellard     if (!p->code_bitmap)
7679fa3e853Sbellard         return;
7689fa3e853Sbellard 
7699fa3e853Sbellard     tb = p->first_tb;
7709fa3e853Sbellard     while (tb != NULL) {
7719fa3e853Sbellard         n = (long)tb & 3;
7729fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
7739fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
7749fa3e853Sbellard         if (n == 0) {
7759fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
7769fa3e853Sbellard                it is not a problem */
7779fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
7789fa3e853Sbellard             tb_end = tb_start + tb->size;
7799fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
7809fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
7819fa3e853Sbellard         } else {
7829fa3e853Sbellard             tb_start = 0;
7839fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
7849fa3e853Sbellard         }
7859fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
7869fa3e853Sbellard         tb = tb->page_next[n];
7879fa3e853Sbellard     }
7889fa3e853Sbellard }
7899fa3e853Sbellard 
7902e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
7912e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
7922e70f6efSpbrook                               int flags, int cflags)
793d720b93dSbellard {
794d720b93dSbellard     TranslationBlock *tb;
795d720b93dSbellard     uint8_t *tc_ptr;
796d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
797d720b93dSbellard     int code_gen_size;
798d720b93dSbellard 
799c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
800c27004ecSbellard     tb = tb_alloc(pc);
801d720b93dSbellard     if (!tb) {
802d720b93dSbellard         /* flush must be done */
803d720b93dSbellard         tb_flush(env);
804d720b93dSbellard         /* cannot fail at this point */
805c27004ecSbellard         tb = tb_alloc(pc);
8062e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
8072e70f6efSpbrook         tb_invalidated_flag = 1;
808d720b93dSbellard     }
809d720b93dSbellard     tc_ptr = code_gen_ptr;
810d720b93dSbellard     tb->tc_ptr = tc_ptr;
811d720b93dSbellard     tb->cs_base = cs_base;
812d720b93dSbellard     tb->flags = flags;
813d720b93dSbellard     tb->cflags = cflags;
814d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
815d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
816d720b93dSbellard 
817d720b93dSbellard     /* check next page if needed */
818c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
819d720b93dSbellard     phys_page2 = -1;
820c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
821d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
822d720b93dSbellard     }
823d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
8242e70f6efSpbrook     return tb;
825d720b93dSbellard }
826d720b93dSbellard 
8279fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
8289fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
829d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
830d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
831d720b93dSbellard    TB if code is modified inside this TB. */
83200f82b8aSaurel32 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
833d720b93dSbellard                                    int is_cpu_write_access)
8349fa3e853Sbellard {
835d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
836d720b93dSbellard     CPUState *env = cpu_single_env;
8379fa3e853Sbellard     PageDesc *p;
838ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
8399fa3e853Sbellard     target_ulong tb_start, tb_end;
840d720b93dSbellard     target_ulong current_pc, current_cs_base;
8419fa3e853Sbellard 
8429fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
8439fa3e853Sbellard     if (!p)
8449fa3e853Sbellard         return;
8459fa3e853Sbellard     if (!p->code_bitmap &&
846d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
847d720b93dSbellard         is_cpu_write_access) {
8489fa3e853Sbellard         /* build code bitmap */
8499fa3e853Sbellard         build_page_bitmap(p);
8509fa3e853Sbellard     }
8519fa3e853Sbellard 
8529fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
8539fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
854d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
855d720b93dSbellard     current_tb_modified = 0;
856d720b93dSbellard     current_tb = NULL; /* avoid warning */
857d720b93dSbellard     current_pc = 0; /* avoid warning */
858d720b93dSbellard     current_cs_base = 0; /* avoid warning */
859d720b93dSbellard     current_flags = 0; /* avoid warning */
8609fa3e853Sbellard     tb = p->first_tb;
8619fa3e853Sbellard     while (tb != NULL) {
8629fa3e853Sbellard         n = (long)tb & 3;
8639fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8649fa3e853Sbellard         tb_next = tb->page_next[n];
8659fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8669fa3e853Sbellard         if (n == 0) {
8679fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8689fa3e853Sbellard                it is not a problem */
8699fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8709fa3e853Sbellard             tb_end = tb_start + tb->size;
8719fa3e853Sbellard         } else {
8729fa3e853Sbellard             tb_start = tb->page_addr[1];
8739fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8749fa3e853Sbellard         }
8759fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
876d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
877d720b93dSbellard             if (current_tb_not_found) {
878d720b93dSbellard                 current_tb_not_found = 0;
879d720b93dSbellard                 current_tb = NULL;
8802e70f6efSpbrook                 if (env->mem_io_pc) {
881d720b93dSbellard                     /* now we have a real cpu fault */
8822e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
883d720b93dSbellard                 }
884d720b93dSbellard             }
885d720b93dSbellard             if (current_tb == tb &&
8862e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
887d720b93dSbellard                 /* If we are modifying the current TB, we must stop
888d720b93dSbellard                 its execution. We could be more precise by checking
889d720b93dSbellard                 that the modification is after the current PC, but it
890d720b93dSbellard                 would require a specialized function to partially
891d720b93dSbellard                 restore the CPU state */
892d720b93dSbellard 
893d720b93dSbellard                 current_tb_modified = 1;
894d720b93dSbellard                 cpu_restore_state(current_tb, env,
8952e70f6efSpbrook                                   env->mem_io_pc, NULL);
896d720b93dSbellard #if defined(TARGET_I386)
897d720b93dSbellard                 current_flags = env->hflags;
898d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
899d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
900d720b93dSbellard                 current_pc = current_cs_base + env->eip;
901d720b93dSbellard #else
902d720b93dSbellard #error unsupported CPU
903d720b93dSbellard #endif
904d720b93dSbellard             }
905d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9066f5a9f7eSbellard             /* we need to do that to handle the case where a signal
9076f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
9086f5a9f7eSbellard             saved_tb = NULL;
9096f5a9f7eSbellard             if (env) {
910ea1c1802Sbellard                 saved_tb = env->current_tb;
911ea1c1802Sbellard                 env->current_tb = NULL;
9126f5a9f7eSbellard             }
9139fa3e853Sbellard             tb_phys_invalidate(tb, -1);
9146f5a9f7eSbellard             if (env) {
915ea1c1802Sbellard                 env->current_tb = saved_tb;
916ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
917ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
9189fa3e853Sbellard             }
9196f5a9f7eSbellard         }
9209fa3e853Sbellard         tb = tb_next;
9219fa3e853Sbellard     }
9229fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
9239fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
9249fa3e853Sbellard     if (!p->first_tb) {
9259fa3e853Sbellard         invalidate_page_bitmap(p);
926d720b93dSbellard         if (is_cpu_write_access) {
9272e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
928d720b93dSbellard         }
929d720b93dSbellard     }
930d720b93dSbellard #endif
931d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
932d720b93dSbellard     if (current_tb_modified) {
933d720b93dSbellard         /* we generate a block containing just the instruction
934d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
935d720b93dSbellard            itself */
936ea1c1802Sbellard         env->current_tb = NULL;
9372e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
938d720b93dSbellard         cpu_resume_from_signal(env, NULL);
9399fa3e853Sbellard     }
9409fa3e853Sbellard #endif
9419fa3e853Sbellard }
9429fa3e853Sbellard 
9439fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
94400f82b8aSaurel32 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9459fa3e853Sbellard {
9469fa3e853Sbellard     PageDesc *p;
9479fa3e853Sbellard     int offset, b;
94859817ccbSbellard #if 0
949a4193c8aSbellard     if (1) {
950a4193c8aSbellard         if (loglevel) {
951a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
9522e70f6efSpbrook                    cpu_single_env->mem_io_vaddr, len,
953a4193c8aSbellard                    cpu_single_env->eip,
954a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
955a4193c8aSbellard         }
95659817ccbSbellard     }
95759817ccbSbellard #endif
9589fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9599fa3e853Sbellard     if (!p)
9609fa3e853Sbellard         return;
9619fa3e853Sbellard     if (p->code_bitmap) {
9629fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
9639fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
9649fa3e853Sbellard         if (b & ((1 << len) - 1))
9659fa3e853Sbellard             goto do_invalidate;
9669fa3e853Sbellard     } else {
9679fa3e853Sbellard     do_invalidate:
968d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
9699fa3e853Sbellard     }
9709fa3e853Sbellard }
9719fa3e853Sbellard 
9729fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
97300f82b8aSaurel32 static void tb_invalidate_phys_page(target_phys_addr_t addr,
974d720b93dSbellard                                     unsigned long pc, void *puc)
9759fa3e853Sbellard {
976d720b93dSbellard     int n, current_flags, current_tb_modified;
977d720b93dSbellard     target_ulong current_pc, current_cs_base;
9789fa3e853Sbellard     PageDesc *p;
979d720b93dSbellard     TranslationBlock *tb, *current_tb;
980d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
981d720b93dSbellard     CPUState *env = cpu_single_env;
982d720b93dSbellard #endif
9839fa3e853Sbellard 
9849fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
9859fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
986fd6ce8f6Sbellard     if (!p)
987fd6ce8f6Sbellard         return;
988fd6ce8f6Sbellard     tb = p->first_tb;
989d720b93dSbellard     current_tb_modified = 0;
990d720b93dSbellard     current_tb = NULL;
991d720b93dSbellard     current_pc = 0; /* avoid warning */
992d720b93dSbellard     current_cs_base = 0; /* avoid warning */
993d720b93dSbellard     current_flags = 0; /* avoid warning */
994d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
995d720b93dSbellard     if (tb && pc != 0) {
996d720b93dSbellard         current_tb = tb_find_pc(pc);
997d720b93dSbellard     }
998d720b93dSbellard #endif
999fd6ce8f6Sbellard     while (tb != NULL) {
10009fa3e853Sbellard         n = (long)tb & 3;
10019fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1002d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1003d720b93dSbellard         if (current_tb == tb &&
10042e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1005d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1006d720b93dSbellard                    its execution. We could be more precise by checking
1007d720b93dSbellard                    that the modification is after the current PC, but it
1008d720b93dSbellard                    would require a specialized function to partially
1009d720b93dSbellard                    restore the CPU state */
1010d720b93dSbellard 
1011d720b93dSbellard             current_tb_modified = 1;
1012d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
1013d720b93dSbellard #if defined(TARGET_I386)
1014d720b93dSbellard             current_flags = env->hflags;
1015d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1016d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
1017d720b93dSbellard             current_pc = current_cs_base + env->eip;
1018d720b93dSbellard #else
1019d720b93dSbellard #error unsupported CPU
1020d720b93dSbellard #endif
1021d720b93dSbellard         }
1022d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10239fa3e853Sbellard         tb_phys_invalidate(tb, addr);
10249fa3e853Sbellard         tb = tb->page_next[n];
1025fd6ce8f6Sbellard     }
1026fd6ce8f6Sbellard     p->first_tb = NULL;
1027d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1028d720b93dSbellard     if (current_tb_modified) {
1029d720b93dSbellard         /* we generate a block containing just the instruction
1030d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1031d720b93dSbellard            itself */
1032ea1c1802Sbellard         env->current_tb = NULL;
10332e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1034d720b93dSbellard         cpu_resume_from_signal(env, puc);
1035d720b93dSbellard     }
1036d720b93dSbellard #endif
1037fd6ce8f6Sbellard }
10389fa3e853Sbellard #endif
1039fd6ce8f6Sbellard 
1040fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
10419fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
104253a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1043fd6ce8f6Sbellard {
1044fd6ce8f6Sbellard     PageDesc *p;
10459fa3e853Sbellard     TranslationBlock *last_first_tb;
10469fa3e853Sbellard 
10479fa3e853Sbellard     tb->page_addr[n] = page_addr;
10483a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
10499fa3e853Sbellard     tb->page_next[n] = p->first_tb;
10509fa3e853Sbellard     last_first_tb = p->first_tb;
10519fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
10529fa3e853Sbellard     invalidate_page_bitmap(p);
10539fa3e853Sbellard 
1054107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1055d720b93dSbellard 
10569fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
10579fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
105853a5960aSpbrook         target_ulong addr;
105953a5960aSpbrook         PageDesc *p2;
1060fd6ce8f6Sbellard         int prot;
1061fd6ce8f6Sbellard 
1062fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1063fd6ce8f6Sbellard            page fault + mprotect overhead) */
106453a5960aSpbrook         page_addr &= qemu_host_page_mask;
1065fd6ce8f6Sbellard         prot = 0;
106653a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
106753a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
106853a5960aSpbrook 
106953a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
107053a5960aSpbrook             if (!p2)
107153a5960aSpbrook                 continue;
107253a5960aSpbrook             prot |= p2->flags;
107353a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
107453a5960aSpbrook             page_get_flags(addr);
107553a5960aSpbrook           }
107653a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1077fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1078fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1079ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
108053a5960aSpbrook                page_addr);
1081fd6ce8f6Sbellard #endif
1082fd6ce8f6Sbellard     }
10839fa3e853Sbellard #else
10849fa3e853Sbellard     /* if some code is already present, then the pages are already
10859fa3e853Sbellard        protected. So we handle the case where only the first TB is
10869fa3e853Sbellard        allocated in a physical page */
10879fa3e853Sbellard     if (!last_first_tb) {
10886a00d601Sbellard         tlb_protect_code(page_addr);
10899fa3e853Sbellard     }
10909fa3e853Sbellard #endif
1091d720b93dSbellard 
1092d720b93dSbellard #endif /* TARGET_HAS_SMC */
1093fd6ce8f6Sbellard }
1094fd6ce8f6Sbellard 
1095fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1096fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1097c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1098fd6ce8f6Sbellard {
1099fd6ce8f6Sbellard     TranslationBlock *tb;
1100fd6ce8f6Sbellard 
110126a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
110226a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1103d4e8164fSbellard         return NULL;
1104fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1105fd6ce8f6Sbellard     tb->pc = pc;
1106b448f2f3Sbellard     tb->cflags = 0;
1107d4e8164fSbellard     return tb;
1108d4e8164fSbellard }
1109d4e8164fSbellard 
11102e70f6efSpbrook void tb_free(TranslationBlock *tb)
11112e70f6efSpbrook {
1112bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
11132e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
11142e70f6efSpbrook        be the last one generated.  */
11152e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
11162e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
11172e70f6efSpbrook         nb_tbs--;
11182e70f6efSpbrook     }
11192e70f6efSpbrook }
11202e70f6efSpbrook 
11219fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
11229fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
11239fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
11249fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1125d4e8164fSbellard {
11269fa3e853Sbellard     unsigned int h;
11279fa3e853Sbellard     TranslationBlock **ptb;
11289fa3e853Sbellard 
1129c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1130c8a706feSpbrook        before we are done.  */
1131c8a706feSpbrook     mmap_lock();
11329fa3e853Sbellard     /* add in the physical hash table */
11339fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
11349fa3e853Sbellard     ptb = &tb_phys_hash[h];
11359fa3e853Sbellard     tb->phys_hash_next = *ptb;
11369fa3e853Sbellard     *ptb = tb;
1137fd6ce8f6Sbellard 
1138fd6ce8f6Sbellard     /* add in the page list */
11399fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
11409fa3e853Sbellard     if (phys_page2 != -1)
11419fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
11429fa3e853Sbellard     else
11439fa3e853Sbellard         tb->page_addr[1] = -1;
11449fa3e853Sbellard 
1145d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1146d4e8164fSbellard     tb->jmp_next[0] = NULL;
1147d4e8164fSbellard     tb->jmp_next[1] = NULL;
1148d4e8164fSbellard 
1149d4e8164fSbellard     /* init original jump addresses */
1150d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1151d4e8164fSbellard         tb_reset_jump(tb, 0);
1152d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1153d4e8164fSbellard         tb_reset_jump(tb, 1);
11548a40a180Sbellard 
11558a40a180Sbellard #ifdef DEBUG_TB_CHECK
11568a40a180Sbellard     tb_page_check();
11578a40a180Sbellard #endif
1158c8a706feSpbrook     mmap_unlock();
1159fd6ce8f6Sbellard }
1160fd6ce8f6Sbellard 
1161a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1162a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1163a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1164a513fe19Sbellard {
1165a513fe19Sbellard     int m_min, m_max, m;
1166a513fe19Sbellard     unsigned long v;
1167a513fe19Sbellard     TranslationBlock *tb;
1168a513fe19Sbellard 
1169a513fe19Sbellard     if (nb_tbs <= 0)
1170a513fe19Sbellard         return NULL;
1171a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1172a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1173a513fe19Sbellard         return NULL;
1174a513fe19Sbellard     /* binary search (cf Knuth) */
1175a513fe19Sbellard     m_min = 0;
1176a513fe19Sbellard     m_max = nb_tbs - 1;
1177a513fe19Sbellard     while (m_min <= m_max) {
1178a513fe19Sbellard         m = (m_min + m_max) >> 1;
1179a513fe19Sbellard         tb = &tbs[m];
1180a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1181a513fe19Sbellard         if (v == tc_ptr)
1182a513fe19Sbellard             return tb;
1183a513fe19Sbellard         else if (tc_ptr < v) {
1184a513fe19Sbellard             m_max = m - 1;
1185a513fe19Sbellard         } else {
1186a513fe19Sbellard             m_min = m + 1;
1187a513fe19Sbellard         }
1188a513fe19Sbellard     }
1189a513fe19Sbellard     return &tbs[m_max];
1190a513fe19Sbellard }
11917501267eSbellard 
1192ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1193ea041c0eSbellard 
1194ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1195ea041c0eSbellard {
1196ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1197ea041c0eSbellard     unsigned int n1;
1198ea041c0eSbellard 
1199ea041c0eSbellard     tb1 = tb->jmp_next[n];
1200ea041c0eSbellard     if (tb1 != NULL) {
1201ea041c0eSbellard         /* find head of list */
1202ea041c0eSbellard         for(;;) {
1203ea041c0eSbellard             n1 = (long)tb1 & 3;
1204ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1205ea041c0eSbellard             if (n1 == 2)
1206ea041c0eSbellard                 break;
1207ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1208ea041c0eSbellard         }
1209ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1210ea041c0eSbellard         tb_next = tb1;
1211ea041c0eSbellard 
1212ea041c0eSbellard         /* remove tb from the jmp_first list */
1213ea041c0eSbellard         ptb = &tb_next->jmp_first;
1214ea041c0eSbellard         for(;;) {
1215ea041c0eSbellard             tb1 = *ptb;
1216ea041c0eSbellard             n1 = (long)tb1 & 3;
1217ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1218ea041c0eSbellard             if (n1 == n && tb1 == tb)
1219ea041c0eSbellard                 break;
1220ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1221ea041c0eSbellard         }
1222ea041c0eSbellard         *ptb = tb->jmp_next[n];
1223ea041c0eSbellard         tb->jmp_next[n] = NULL;
1224ea041c0eSbellard 
1225ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1226ea041c0eSbellard         tb_reset_jump(tb, n);
1227ea041c0eSbellard 
12280124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1229ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1230ea041c0eSbellard     }
1231ea041c0eSbellard }
1232ea041c0eSbellard 
1233ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1234ea041c0eSbellard {
1235ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1236ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1237ea041c0eSbellard }
1238ea041c0eSbellard 
12391fddef4bSbellard #if defined(TARGET_HAS_ICE)
1240d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1241d720b93dSbellard {
12429b3c35e0Sj_mayer     target_phys_addr_t addr;
12439b3c35e0Sj_mayer     target_ulong pd;
1244c2f07f81Spbrook     ram_addr_t ram_addr;
1245c2f07f81Spbrook     PhysPageDesc *p;
1246d720b93dSbellard 
1247c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1248c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1249c2f07f81Spbrook     if (!p) {
1250c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1251c2f07f81Spbrook     } else {
1252c2f07f81Spbrook         pd = p->phys_offset;
1253c2f07f81Spbrook     }
1254c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1255706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1256d720b93dSbellard }
1257c27004ecSbellard #endif
1258d720b93dSbellard 
12596658ffb8Spbrook /* Add a watchpoint.  */
12600f459d16Spbrook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
12616658ffb8Spbrook {
12626658ffb8Spbrook     int i;
12636658ffb8Spbrook 
12646658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
12656658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr)
12666658ffb8Spbrook             return 0;
12676658ffb8Spbrook     }
12686658ffb8Spbrook     if (env->nb_watchpoints >= MAX_WATCHPOINTS)
12696658ffb8Spbrook         return -1;
12706658ffb8Spbrook 
12716658ffb8Spbrook     i = env->nb_watchpoints++;
12726658ffb8Spbrook     env->watchpoint[i].vaddr = addr;
12730f459d16Spbrook     env->watchpoint[i].type = type;
12746658ffb8Spbrook     tlb_flush_page(env, addr);
12756658ffb8Spbrook     /* FIXME: This flush is needed because of the hack to make memory ops
12766658ffb8Spbrook        terminate the TB.  It can be removed once the proper IO trap and
12776658ffb8Spbrook        re-execute bits are in.  */
12786658ffb8Spbrook     tb_flush(env);
12796658ffb8Spbrook     return i;
12806658ffb8Spbrook }
12816658ffb8Spbrook 
12826658ffb8Spbrook /* Remove a watchpoint.  */
12836658ffb8Spbrook int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
12846658ffb8Spbrook {
12856658ffb8Spbrook     int i;
12866658ffb8Spbrook 
12876658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
12886658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr) {
12896658ffb8Spbrook             env->nb_watchpoints--;
12906658ffb8Spbrook             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
12916658ffb8Spbrook             tlb_flush_page(env, addr);
12926658ffb8Spbrook             return 0;
12936658ffb8Spbrook         }
12946658ffb8Spbrook     }
12956658ffb8Spbrook     return -1;
12966658ffb8Spbrook }
12976658ffb8Spbrook 
12987d03f82fSedgar_igl /* Remove all watchpoints. */
12997d03f82fSedgar_igl void cpu_watchpoint_remove_all(CPUState *env) {
13007d03f82fSedgar_igl     int i;
13017d03f82fSedgar_igl 
13027d03f82fSedgar_igl     for (i = 0; i < env->nb_watchpoints; i++) {
13037d03f82fSedgar_igl         tlb_flush_page(env, env->watchpoint[i].vaddr);
13047d03f82fSedgar_igl     }
13057d03f82fSedgar_igl     env->nb_watchpoints = 0;
13067d03f82fSedgar_igl }
13077d03f82fSedgar_igl 
1308c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1309c33a346eSbellard    breakpoint is reached */
13102e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
13114c3a88a2Sbellard {
13121fddef4bSbellard #if defined(TARGET_HAS_ICE)
13134c3a88a2Sbellard     int i;
13144c3a88a2Sbellard 
13154c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
13164c3a88a2Sbellard         if (env->breakpoints[i] == pc)
13174c3a88a2Sbellard             return 0;
13184c3a88a2Sbellard     }
13194c3a88a2Sbellard 
13204c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
13214c3a88a2Sbellard         return -1;
13224c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1323d720b93dSbellard 
1324d720b93dSbellard     breakpoint_invalidate(env, pc);
13254c3a88a2Sbellard     return 0;
13264c3a88a2Sbellard #else
13274c3a88a2Sbellard     return -1;
13284c3a88a2Sbellard #endif
13294c3a88a2Sbellard }
13304c3a88a2Sbellard 
13317d03f82fSedgar_igl /* remove all breakpoints */
13327d03f82fSedgar_igl void cpu_breakpoint_remove_all(CPUState *env) {
13337d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
13347d03f82fSedgar_igl     int i;
13357d03f82fSedgar_igl     for(i = 0; i < env->nb_breakpoints; i++) {
13367d03f82fSedgar_igl         breakpoint_invalidate(env, env->breakpoints[i]);
13377d03f82fSedgar_igl     }
13387d03f82fSedgar_igl     env->nb_breakpoints = 0;
13397d03f82fSedgar_igl #endif
13407d03f82fSedgar_igl }
13417d03f82fSedgar_igl 
13424c3a88a2Sbellard /* remove a breakpoint */
13432e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
13444c3a88a2Sbellard {
13451fddef4bSbellard #if defined(TARGET_HAS_ICE)
13464c3a88a2Sbellard     int i;
13474c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
13484c3a88a2Sbellard         if (env->breakpoints[i] == pc)
13494c3a88a2Sbellard             goto found;
13504c3a88a2Sbellard     }
13514c3a88a2Sbellard     return -1;
13524c3a88a2Sbellard  found:
13534c3a88a2Sbellard     env->nb_breakpoints--;
13541fddef4bSbellard     if (i < env->nb_breakpoints)
13551fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1356d720b93dSbellard 
1357d720b93dSbellard     breakpoint_invalidate(env, pc);
13584c3a88a2Sbellard     return 0;
13594c3a88a2Sbellard #else
13604c3a88a2Sbellard     return -1;
13614c3a88a2Sbellard #endif
13624c3a88a2Sbellard }
13634c3a88a2Sbellard 
1364c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1365c33a346eSbellard    CPU loop after each instruction */
1366c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1367c33a346eSbellard {
13681fddef4bSbellard #if defined(TARGET_HAS_ICE)
1369c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1370c33a346eSbellard         env->singlestep_enabled = enabled;
1371c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
13729fa3e853Sbellard         /* XXX: only flush what is necessary */
13730124311eSbellard         tb_flush(env);
1374c33a346eSbellard     }
1375c33a346eSbellard #endif
1376c33a346eSbellard }
1377c33a346eSbellard 
137834865134Sbellard /* enable or disable low levels log */
137934865134Sbellard void cpu_set_log(int log_flags)
138034865134Sbellard {
138134865134Sbellard     loglevel = log_flags;
138234865134Sbellard     if (loglevel && !logfile) {
138311fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
138434865134Sbellard         if (!logfile) {
138534865134Sbellard             perror(logfilename);
138634865134Sbellard             _exit(1);
138734865134Sbellard         }
13889fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13899fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
13909fa3e853Sbellard         {
13919fa3e853Sbellard             static uint8_t logfile_buf[4096];
13929fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
13939fa3e853Sbellard         }
13949fa3e853Sbellard #else
139534865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
13969fa3e853Sbellard #endif
1397e735b91cSpbrook         log_append = 1;
1398e735b91cSpbrook     }
1399e735b91cSpbrook     if (!loglevel && logfile) {
1400e735b91cSpbrook         fclose(logfile);
1401e735b91cSpbrook         logfile = NULL;
140234865134Sbellard     }
140334865134Sbellard }
140434865134Sbellard 
140534865134Sbellard void cpu_set_log_filename(const char *filename)
140634865134Sbellard {
140734865134Sbellard     logfilename = strdup(filename);
1408e735b91cSpbrook     if (logfile) {
1409e735b91cSpbrook         fclose(logfile);
1410e735b91cSpbrook         logfile = NULL;
1411e735b91cSpbrook     }
1412e735b91cSpbrook     cpu_set_log(loglevel);
141334865134Sbellard }
1414c33a346eSbellard 
14150124311eSbellard /* mask must never be zero, except for A20 change call */
141668a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1417ea041c0eSbellard {
1418d5975363Spbrook #if !defined(USE_NPTL)
1419ea041c0eSbellard     TranslationBlock *tb;
142015a51156Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1421d5975363Spbrook #endif
14222e70f6efSpbrook     int old_mask;
1423ea041c0eSbellard 
14242e70f6efSpbrook     old_mask = env->interrupt_request;
1425d5975363Spbrook     /* FIXME: This is probably not threadsafe.  A different thread could
1426bf20dc07Sths        be in the middle of a read-modify-write operation.  */
142768a79315Sbellard     env->interrupt_request |= mask;
1428d5975363Spbrook #if defined(USE_NPTL)
1429d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1430d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1431d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1432d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
1433d5975363Spbrook #else
14342e70f6efSpbrook     if (use_icount) {
14352e70f6efSpbrook         env->icount_decr.u16.high = 0x8000;
14362e70f6efSpbrook #ifndef CONFIG_USER_ONLY
14372e70f6efSpbrook         /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
14382e70f6efSpbrook            an async event happened and we need to process it.  */
14392e70f6efSpbrook         if (!can_do_io(env)
14402e70f6efSpbrook             && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
14412e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
14422e70f6efSpbrook         }
14432e70f6efSpbrook #endif
14442e70f6efSpbrook     } else {
14452e70f6efSpbrook         tb = env->current_tb;
1446ea041c0eSbellard         /* if the cpu is currently executing code, we must unlink it and
1447ea041c0eSbellard            all the potentially executing TB */
1448ee8b7021Sbellard         if (tb && !testandset(&interrupt_lock)) {
1449ee8b7021Sbellard             env->current_tb = NULL;
1450ea041c0eSbellard             tb_reset_jump_recursive(tb);
145115a51156Saurel32             resetlock(&interrupt_lock);
1452ea041c0eSbellard         }
14532e70f6efSpbrook     }
1454d5975363Spbrook #endif
1455ea041c0eSbellard }
1456ea041c0eSbellard 
1457b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1458b54ad049Sbellard {
1459b54ad049Sbellard     env->interrupt_request &= ~mask;
1460b54ad049Sbellard }
1461b54ad049Sbellard 
1462f193c797Sbellard CPULogItem cpu_log_items[] = {
1463f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1464f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1465f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1466f193c797Sbellard       "show target assembly code for each compiled TB" },
1467f193c797Sbellard     { CPU_LOG_TB_OP, "op",
146857fec1feSbellard       "show micro ops for each compiled TB" },
1469f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1470e01a1157Sblueswir1       "show micro ops "
1471e01a1157Sblueswir1 #ifdef TARGET_I386
1472e01a1157Sblueswir1       "before eflags optimization and "
1473f193c797Sbellard #endif
1474e01a1157Sblueswir1       "after liveness analysis" },
1475f193c797Sbellard     { CPU_LOG_INT, "int",
1476f193c797Sbellard       "show interrupts/exceptions in short format" },
1477f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1478f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
14799fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1480e91c8a77Sths       "show CPU state before block translation" },
1481f193c797Sbellard #ifdef TARGET_I386
1482f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1483f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1484f193c797Sbellard #endif
14858e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1486fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1487fd872598Sbellard       "show all i/o ports accesses" },
14888e3a9fd2Sbellard #endif
1489f193c797Sbellard     { 0, NULL, NULL },
1490f193c797Sbellard };
1491f193c797Sbellard 
1492f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1493f193c797Sbellard {
1494f193c797Sbellard     if (strlen(s2) != n)
1495f193c797Sbellard         return 0;
1496f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1497f193c797Sbellard }
1498f193c797Sbellard 
1499f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1500f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1501f193c797Sbellard {
1502f193c797Sbellard     CPULogItem *item;
1503f193c797Sbellard     int mask;
1504f193c797Sbellard     const char *p, *p1;
1505f193c797Sbellard 
1506f193c797Sbellard     p = str;
1507f193c797Sbellard     mask = 0;
1508f193c797Sbellard     for(;;) {
1509f193c797Sbellard         p1 = strchr(p, ',');
1510f193c797Sbellard         if (!p1)
1511f193c797Sbellard             p1 = p + strlen(p);
15128e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
15138e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
15148e3a9fd2Sbellard 			mask |= item->mask;
15158e3a9fd2Sbellard 		}
15168e3a9fd2Sbellard 	} else {
1517f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1518f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1519f193c797Sbellard                 goto found;
1520f193c797Sbellard         }
1521f193c797Sbellard         return 0;
15228e3a9fd2Sbellard 	}
1523f193c797Sbellard     found:
1524f193c797Sbellard         mask |= item->mask;
1525f193c797Sbellard         if (*p1 != ',')
1526f193c797Sbellard             break;
1527f193c797Sbellard         p = p1 + 1;
1528f193c797Sbellard     }
1529f193c797Sbellard     return mask;
1530f193c797Sbellard }
1531ea041c0eSbellard 
15327501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
15337501267eSbellard {
15347501267eSbellard     va_list ap;
1535493ae1f0Spbrook     va_list ap2;
15367501267eSbellard 
15377501267eSbellard     va_start(ap, fmt);
1538493ae1f0Spbrook     va_copy(ap2, ap);
15397501267eSbellard     fprintf(stderr, "qemu: fatal: ");
15407501267eSbellard     vfprintf(stderr, fmt, ap);
15417501267eSbellard     fprintf(stderr, "\n");
15427501267eSbellard #ifdef TARGET_I386
15437fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
15447fe48483Sbellard #else
15457fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
15467501267eSbellard #endif
1547924edcaeSbalrog     if (logfile) {
1548f9373291Sj_mayer         fprintf(logfile, "qemu: fatal: ");
1549493ae1f0Spbrook         vfprintf(logfile, fmt, ap2);
1550f9373291Sj_mayer         fprintf(logfile, "\n");
1551f9373291Sj_mayer #ifdef TARGET_I386
1552f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1553f9373291Sj_mayer #else
1554f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, 0);
1555f9373291Sj_mayer #endif
1556924edcaeSbalrog         fflush(logfile);
1557924edcaeSbalrog         fclose(logfile);
1558924edcaeSbalrog     }
1559493ae1f0Spbrook     va_end(ap2);
1560f9373291Sj_mayer     va_end(ap);
15617501267eSbellard     abort();
15627501267eSbellard }
15637501267eSbellard 
1564c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1565c5be9f08Sths {
156601ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1567c5be9f08Sths     /* preserve chaining and index */
1568c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1569c5be9f08Sths     int cpu_index = new_env->cpu_index;
1570c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
1571c5be9f08Sths     new_env->next_cpu = next_cpu;
1572c5be9f08Sths     new_env->cpu_index = cpu_index;
1573c5be9f08Sths     return new_env;
1574c5be9f08Sths }
1575c5be9f08Sths 
15760124311eSbellard #if !defined(CONFIG_USER_ONLY)
15770124311eSbellard 
15785c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
15795c751e99Sedgar_igl {
15805c751e99Sedgar_igl     unsigned int i;
15815c751e99Sedgar_igl 
15825c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
15835c751e99Sedgar_igl        overlap the flushed page.  */
15845c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
15855c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
15865c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
15875c751e99Sedgar_igl 
15885c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
15895c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
15905c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
15915c751e99Sedgar_igl }
15925c751e99Sedgar_igl 
1593ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1594ee8b7021Sbellard    implemented yet) */
1595ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
159633417e70Sbellard {
159733417e70Sbellard     int i;
15980124311eSbellard 
15999fa3e853Sbellard #if defined(DEBUG_TLB)
16009fa3e853Sbellard     printf("tlb_flush:\n");
16019fa3e853Sbellard #endif
16020124311eSbellard     /* must reset current TB so that interrupts cannot modify the
16030124311eSbellard        links while we are modifying them */
16040124311eSbellard     env->current_tb = NULL;
16050124311eSbellard 
160633417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
160784b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
160884b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
160984b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
161084b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
161184b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
161284b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
16136fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
16146fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
16156fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
16166fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
16176fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
16186fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
16196fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
16206fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
16216fa4cea9Sj_mayer #endif
16226fa4cea9Sj_mayer #endif
162333417e70Sbellard     }
16249fa3e853Sbellard 
16258a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
16269fa3e853Sbellard 
16270a962c02Sbellard #ifdef USE_KQEMU
16280a962c02Sbellard     if (env->kqemu_enabled) {
16290a962c02Sbellard         kqemu_flush(env, flush_global);
16300a962c02Sbellard     }
16310a962c02Sbellard #endif
1632e3db7226Sbellard     tlb_flush_count++;
163333417e70Sbellard }
163433417e70Sbellard 
1635274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
163661382a50Sbellard {
163784b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
163884b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
163984b7b8e7Sbellard         addr == (tlb_entry->addr_write &
164084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
164184b7b8e7Sbellard         addr == (tlb_entry->addr_code &
164284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
164384b7b8e7Sbellard         tlb_entry->addr_read = -1;
164484b7b8e7Sbellard         tlb_entry->addr_write = -1;
164584b7b8e7Sbellard         tlb_entry->addr_code = -1;
164684b7b8e7Sbellard     }
164761382a50Sbellard }
164861382a50Sbellard 
16492e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
165033417e70Sbellard {
16518a40a180Sbellard     int i;
16520124311eSbellard 
16539fa3e853Sbellard #if defined(DEBUG_TLB)
1654108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
16559fa3e853Sbellard #endif
16560124311eSbellard     /* must reset current TB so that interrupts cannot modify the
16570124311eSbellard        links while we are modifying them */
16580124311eSbellard     env->current_tb = NULL;
165933417e70Sbellard 
166061382a50Sbellard     addr &= TARGET_PAGE_MASK;
166133417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
166284b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
166384b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
16646fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
16656fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
16666fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
16676fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
16686fa4cea9Sj_mayer #endif
16696fa4cea9Sj_mayer #endif
16700124311eSbellard 
16715c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
167261382a50Sbellard 
16730a962c02Sbellard #ifdef USE_KQEMU
16740a962c02Sbellard     if (env->kqemu_enabled) {
16750a962c02Sbellard         kqemu_flush_page(env, addr);
16760a962c02Sbellard     }
16770a962c02Sbellard #endif
16789fa3e853Sbellard }
16799fa3e853Sbellard 
16809fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
16819fa3e853Sbellard    can be detected */
16826a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
168361382a50Sbellard {
16846a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
16856a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
16866a00d601Sbellard                                     CODE_DIRTY_FLAG);
16879fa3e853Sbellard }
16889fa3e853Sbellard 
16899fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
16903a7d929eSbellard    tested for self modifying code */
16913a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
16923a7d929eSbellard                                     target_ulong vaddr)
16939fa3e853Sbellard {
16943a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
16959fa3e853Sbellard }
16969fa3e853Sbellard 
16971ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
16981ccde1cbSbellard                                          unsigned long start, unsigned long length)
16991ccde1cbSbellard {
17001ccde1cbSbellard     unsigned long addr;
170184b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
170284b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
17031ccde1cbSbellard         if ((addr - start) < length) {
17040f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
17051ccde1cbSbellard         }
17061ccde1cbSbellard     }
17071ccde1cbSbellard }
17081ccde1cbSbellard 
17093a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
17100a962c02Sbellard                                      int dirty_flags)
17111ccde1cbSbellard {
17121ccde1cbSbellard     CPUState *env;
17134f2ac237Sbellard     unsigned long length, start1;
17140a962c02Sbellard     int i, mask, len;
17150a962c02Sbellard     uint8_t *p;
17161ccde1cbSbellard 
17171ccde1cbSbellard     start &= TARGET_PAGE_MASK;
17181ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
17191ccde1cbSbellard 
17201ccde1cbSbellard     length = end - start;
17211ccde1cbSbellard     if (length == 0)
17221ccde1cbSbellard         return;
17230a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
17243a7d929eSbellard #ifdef USE_KQEMU
17256a00d601Sbellard     /* XXX: should not depend on cpu context */
17266a00d601Sbellard     env = first_cpu;
17273a7d929eSbellard     if (env->kqemu_enabled) {
1728f23db169Sbellard         ram_addr_t addr;
1729f23db169Sbellard         addr = start;
1730f23db169Sbellard         for(i = 0; i < len; i++) {
1731f23db169Sbellard             kqemu_set_notdirty(env, addr);
1732f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1733f23db169Sbellard         }
17343a7d929eSbellard     }
17353a7d929eSbellard #endif
1736f23db169Sbellard     mask = ~dirty_flags;
1737f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1738f23db169Sbellard     for(i = 0; i < len; i++)
1739f23db169Sbellard         p[i] &= mask;
1740f23db169Sbellard 
17411ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
17421ccde1cbSbellard        when accessing the range */
174359817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
17446a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
17451ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
174684b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
17471ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
174884b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
17496fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17506fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
17516fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
17526fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17536fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
17546fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
17556fa4cea9Sj_mayer #endif
17566fa4cea9Sj_mayer #endif
17576a00d601Sbellard     }
17581ccde1cbSbellard }
17591ccde1cbSbellard 
17603a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
17613a7d929eSbellard {
17623a7d929eSbellard     ram_addr_t ram_addr;
17633a7d929eSbellard 
176484b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
176584b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
17663a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
17673a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
17680f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
17693a7d929eSbellard         }
17703a7d929eSbellard     }
17713a7d929eSbellard }
17723a7d929eSbellard 
17733a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
17743a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
17753a7d929eSbellard {
17763a7d929eSbellard     int i;
17773a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
177884b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
17793a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
178084b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
17816fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
17826fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
17836fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
17846fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
17856fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
17866fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
17876fa4cea9Sj_mayer #endif
17886fa4cea9Sj_mayer #endif
17893a7d929eSbellard }
17903a7d929eSbellard 
17910f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
17921ccde1cbSbellard {
17930f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
17940f459d16Spbrook         tlb_entry->addr_write = vaddr;
17951ccde1cbSbellard }
17961ccde1cbSbellard 
17970f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
17980f459d16Spbrook    so that it is no longer dirty */
17990f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
18001ccde1cbSbellard {
18011ccde1cbSbellard     int i;
18021ccde1cbSbellard 
18030f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
18041ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
18050f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
18060f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
18076fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
18080f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
18096fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
18100f459d16Spbrook     tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
18116fa4cea9Sj_mayer #endif
18126fa4cea9Sj_mayer #endif
18131ccde1cbSbellard }
18141ccde1cbSbellard 
181559817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
181659817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
181759817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
181859817ccbSbellard    conflicting with the host address space). */
181984b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
18202e12669aSbellard                       target_phys_addr_t paddr, int prot,
18216ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
18229fa3e853Sbellard {
182392e873b9Sbellard     PhysPageDesc *p;
18244f2ac237Sbellard     unsigned long pd;
18259fa3e853Sbellard     unsigned int index;
18264f2ac237Sbellard     target_ulong address;
18270f459d16Spbrook     target_ulong code_address;
1828108c49b8Sbellard     target_phys_addr_t addend;
18299fa3e853Sbellard     int ret;
183084b7b8e7Sbellard     CPUTLBEntry *te;
18316658ffb8Spbrook     int i;
18320f459d16Spbrook     target_phys_addr_t iotlb;
18339fa3e853Sbellard 
183492e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
18359fa3e853Sbellard     if (!p) {
18369fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
18379fa3e853Sbellard     } else {
18389fa3e853Sbellard         pd = p->phys_offset;
18399fa3e853Sbellard     }
18409fa3e853Sbellard #if defined(DEBUG_TLB)
18416ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
18426ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
18439fa3e853Sbellard #endif
18449fa3e853Sbellard 
18459fa3e853Sbellard     ret = 0;
18469fa3e853Sbellard     address = vaddr;
18470f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
18480f459d16Spbrook         /* IO memory case (romd handled later) */
18490f459d16Spbrook         address |= TLB_MMIO;
18500f459d16Spbrook     }
18519fa3e853Sbellard     addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
18520f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
18530f459d16Spbrook         /* Normal RAM.  */
18540f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
18550f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
18560f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
18570f459d16Spbrook         else
18580f459d16Spbrook             iotlb |= IO_MEM_ROM;
18590f459d16Spbrook     } else {
18600f459d16Spbrook         /* IO handlers are currently passed a phsical address.
18610f459d16Spbrook            It would be nice to pass an offset from the base address
18620f459d16Spbrook            of that region.  This would avoid having to special case RAM,
18630f459d16Spbrook            and avoid full address decoding in every device.
18640f459d16Spbrook            We can't use the high bits of pd for this because
18650f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
18660f459d16Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
18679fa3e853Sbellard     }
18689fa3e853Sbellard 
18690f459d16Spbrook     code_address = address;
18706658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
18716658ffb8Spbrook        watchpoint trap routines.  */
18726658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
18736658ffb8Spbrook         if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
18740f459d16Spbrook             iotlb = io_mem_watch + paddr;
18750f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
18760f459d16Spbrook                reads of pages with a write breakpoint.  */
18770f459d16Spbrook             address |= TLB_MMIO;
18786658ffb8Spbrook         }
18796658ffb8Spbrook     }
18806658ffb8Spbrook 
188190f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
18820f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
18836ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
18840f459d16Spbrook     te->addend = addend - vaddr;
188567b915a5Sbellard     if (prot & PAGE_READ) {
188684b7b8e7Sbellard         te->addr_read = address;
18879fa3e853Sbellard     } else {
188884b7b8e7Sbellard         te->addr_read = -1;
188984b7b8e7Sbellard     }
18905c751e99Sedgar_igl 
189184b7b8e7Sbellard     if (prot & PAGE_EXEC) {
18920f459d16Spbrook         te->addr_code = code_address;
189384b7b8e7Sbellard     } else {
189484b7b8e7Sbellard         te->addr_code = -1;
18959fa3e853Sbellard     }
189667b915a5Sbellard     if (prot & PAGE_WRITE) {
1897856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1898856074ecSbellard             (pd & IO_MEM_ROMD)) {
18990f459d16Spbrook             /* Write access calls the I/O callback.  */
19000f459d16Spbrook             te->addr_write = address | TLB_MMIO;
19013a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
19021ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
19030f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
19049fa3e853Sbellard         } else {
190584b7b8e7Sbellard             te->addr_write = address;
19069fa3e853Sbellard         }
19079fa3e853Sbellard     } else {
190884b7b8e7Sbellard         te->addr_write = -1;
19099fa3e853Sbellard     }
19109fa3e853Sbellard     return ret;
19119fa3e853Sbellard }
19129fa3e853Sbellard 
19130124311eSbellard #else
19140124311eSbellard 
1915ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
19160124311eSbellard {
19170124311eSbellard }
19180124311eSbellard 
19192e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
19200124311eSbellard {
19210124311eSbellard }
19220124311eSbellard 
192384b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
19242e12669aSbellard                       target_phys_addr_t paddr, int prot,
19256ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
192633417e70Sbellard {
19279fa3e853Sbellard     return 0;
192833417e70Sbellard }
192933417e70Sbellard 
19309fa3e853Sbellard /* dump memory mappings */
19319fa3e853Sbellard void page_dump(FILE *f)
193233417e70Sbellard {
19339fa3e853Sbellard     unsigned long start, end;
19349fa3e853Sbellard     int i, j, prot, prot1;
19359fa3e853Sbellard     PageDesc *p;
19369fa3e853Sbellard 
19379fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
19389fa3e853Sbellard             "start", "end", "size", "prot");
19399fa3e853Sbellard     start = -1;
19409fa3e853Sbellard     end = -1;
19419fa3e853Sbellard     prot = 0;
19429fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
19439fa3e853Sbellard         if (i < L1_SIZE)
19449fa3e853Sbellard             p = l1_map[i];
19459fa3e853Sbellard         else
19469fa3e853Sbellard             p = NULL;
19479fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
194833417e70Sbellard             if (!p)
19499fa3e853Sbellard                 prot1 = 0;
19509fa3e853Sbellard             else
19519fa3e853Sbellard                 prot1 = p[j].flags;
19529fa3e853Sbellard             if (prot1 != prot) {
19539fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
19549fa3e853Sbellard                 if (start != -1) {
19559fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
19569fa3e853Sbellard                             start, end, end - start,
19579fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
19589fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
19599fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
196033417e70Sbellard                 }
19619fa3e853Sbellard                 if (prot1 != 0)
19629fa3e853Sbellard                     start = end;
19639fa3e853Sbellard                 else
19649fa3e853Sbellard                     start = -1;
19659fa3e853Sbellard                 prot = prot1;
19669fa3e853Sbellard             }
19679fa3e853Sbellard             if (!p)
19689fa3e853Sbellard                 break;
19699fa3e853Sbellard         }
19709fa3e853Sbellard     }
19719fa3e853Sbellard }
19729fa3e853Sbellard 
197353a5960aSpbrook int page_get_flags(target_ulong address)
19749fa3e853Sbellard {
19759fa3e853Sbellard     PageDesc *p;
19769fa3e853Sbellard 
19779fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
19789fa3e853Sbellard     if (!p)
19799fa3e853Sbellard         return 0;
19809fa3e853Sbellard     return p->flags;
19819fa3e853Sbellard }
19829fa3e853Sbellard 
19839fa3e853Sbellard /* modify the flags of a page and invalidate the code if
19849fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
19859fa3e853Sbellard    depending on PAGE_WRITE */
198653a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
19879fa3e853Sbellard {
19889fa3e853Sbellard     PageDesc *p;
198953a5960aSpbrook     target_ulong addr;
19909fa3e853Sbellard 
1991c8a706feSpbrook     /* mmap_lock should already be held.  */
19929fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
19939fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
19949fa3e853Sbellard     if (flags & PAGE_WRITE)
19959fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
19969fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
19979fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
199817e2377aSpbrook         /* We may be called for host regions that are outside guest
199917e2377aSpbrook            address space.  */
200017e2377aSpbrook         if (!p)
200117e2377aSpbrook             return;
20029fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
20039fa3e853Sbellard            inside */
20049fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
20059fa3e853Sbellard             (flags & PAGE_WRITE) &&
20069fa3e853Sbellard             p->first_tb) {
2007d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
20089fa3e853Sbellard         }
20099fa3e853Sbellard         p->flags = flags;
20109fa3e853Sbellard     }
20119fa3e853Sbellard }
20129fa3e853Sbellard 
20133d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
20143d97b40bSths {
20153d97b40bSths     PageDesc *p;
20163d97b40bSths     target_ulong end;
20173d97b40bSths     target_ulong addr;
20183d97b40bSths 
20193d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
20203d97b40bSths     start = start & TARGET_PAGE_MASK;
20213d97b40bSths 
20223d97b40bSths     if( end < start )
20233d97b40bSths         /* we've wrapped around */
20243d97b40bSths         return -1;
20253d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
20263d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
20273d97b40bSths         if( !p )
20283d97b40bSths             return -1;
20293d97b40bSths         if( !(p->flags & PAGE_VALID) )
20303d97b40bSths             return -1;
20313d97b40bSths 
2032dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
20333d97b40bSths             return -1;
2034dae3270cSbellard         if (flags & PAGE_WRITE) {
2035dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
20363d97b40bSths                 return -1;
2037dae3270cSbellard             /* unprotect the page if it was put read-only because it
2038dae3270cSbellard                contains translated code */
2039dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2040dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2041dae3270cSbellard                     return -1;
2042dae3270cSbellard             }
2043dae3270cSbellard             return 0;
2044dae3270cSbellard         }
20453d97b40bSths     }
20463d97b40bSths     return 0;
20473d97b40bSths }
20483d97b40bSths 
20499fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
20509fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
205153a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
20529fa3e853Sbellard {
20539fa3e853Sbellard     unsigned int page_index, prot, pindex;
20549fa3e853Sbellard     PageDesc *p, *p1;
205553a5960aSpbrook     target_ulong host_start, host_end, addr;
20569fa3e853Sbellard 
2057c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2058c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2059c8a706feSpbrook        practice it seems to be ok.  */
2060c8a706feSpbrook     mmap_lock();
2061c8a706feSpbrook 
206283fb7adfSbellard     host_start = address & qemu_host_page_mask;
20639fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
20649fa3e853Sbellard     p1 = page_find(page_index);
2065c8a706feSpbrook     if (!p1) {
2066c8a706feSpbrook         mmap_unlock();
20679fa3e853Sbellard         return 0;
2068c8a706feSpbrook     }
206983fb7adfSbellard     host_end = host_start + qemu_host_page_size;
20709fa3e853Sbellard     p = p1;
20719fa3e853Sbellard     prot = 0;
20729fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
20739fa3e853Sbellard         prot |= p->flags;
20749fa3e853Sbellard         p++;
20759fa3e853Sbellard     }
20769fa3e853Sbellard     /* if the page was really writable, then we change its
20779fa3e853Sbellard        protection back to writable */
20789fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
20799fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
20809fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
208153a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
20829fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
20839fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
20849fa3e853Sbellard             /* and since the content will be modified, we must invalidate
20859fa3e853Sbellard                the corresponding translated code. */
2086d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
20879fa3e853Sbellard #ifdef DEBUG_TB_CHECK
20889fa3e853Sbellard             tb_invalidate_check(address);
20899fa3e853Sbellard #endif
2090c8a706feSpbrook             mmap_unlock();
20919fa3e853Sbellard             return 1;
20929fa3e853Sbellard         }
20939fa3e853Sbellard     }
2094c8a706feSpbrook     mmap_unlock();
20959fa3e853Sbellard     return 0;
20969fa3e853Sbellard }
20979fa3e853Sbellard 
20986a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
20996a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
21001ccde1cbSbellard {
21011ccde1cbSbellard }
21029fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
210333417e70Sbellard 
2104e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
2105db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
210600f82b8aSaurel32                              ram_addr_t memory);
210700f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
210800f82b8aSaurel32                            ram_addr_t orig_memory);
2109db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110db7b5426Sblueswir1                       need_subpage)                                     \
2111db7b5426Sblueswir1     do {                                                                \
2112db7b5426Sblueswir1         if (addr > start_addr)                                          \
2113db7b5426Sblueswir1             start_addr2 = 0;                                            \
2114db7b5426Sblueswir1         else {                                                          \
2115db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2116db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2117db7b5426Sblueswir1                 need_subpage = 1;                                       \
2118db7b5426Sblueswir1         }                                                               \
2119db7b5426Sblueswir1                                                                         \
212049e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2121db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2122db7b5426Sblueswir1         else {                                                          \
2123db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2125db7b5426Sblueswir1                 need_subpage = 1;                                       \
2126db7b5426Sblueswir1         }                                                               \
2127db7b5426Sblueswir1     } while (0)
2128db7b5426Sblueswir1 
212933417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
213033417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
213133417e70Sbellard    io memory page */
21322e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
213300f82b8aSaurel32                                   ram_addr_t size,
213400f82b8aSaurel32                                   ram_addr_t phys_offset)
213533417e70Sbellard {
2136108c49b8Sbellard     target_phys_addr_t addr, end_addr;
213792e873b9Sbellard     PhysPageDesc *p;
21389d42037bSbellard     CPUState *env;
213900f82b8aSaurel32     ram_addr_t orig_size = size;
2140db7b5426Sblueswir1     void *subpage;
214133417e70Sbellard 
2142da260249Sbellard #ifdef USE_KQEMU
2143da260249Sbellard     /* XXX: should not depend on cpu context */
2144da260249Sbellard     env = first_cpu;
2145da260249Sbellard     if (env->kqemu_enabled) {
2146da260249Sbellard         kqemu_set_phys_mem(start_addr, size, phys_offset);
2147da260249Sbellard     }
2148da260249Sbellard #endif
21495fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
215049e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
215149e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2152db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2153db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
215400f82b8aSaurel32             ram_addr_t orig_memory = p->phys_offset;
2155db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2156db7b5426Sblueswir1             int need_subpage = 0;
2157db7b5426Sblueswir1 
2158db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2159db7b5426Sblueswir1                           need_subpage);
21604254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2161db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2162db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2163db7b5426Sblueswir1                                            &p->phys_offset, orig_memory);
2164db7b5426Sblueswir1                 } else {
2165db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2166db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2167db7b5426Sblueswir1                 }
2168db7b5426Sblueswir1                 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2169db7b5426Sblueswir1             } else {
2170db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2171db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2173db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2174db7b5426Sblueswir1             }
2175db7b5426Sblueswir1         } else {
2176108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
21779fa3e853Sbellard             p->phys_offset = phys_offset;
21782a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
21792a4188a3Sbellard                 (phys_offset & IO_MEM_ROMD))
218033417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
2181db7b5426Sblueswir1             else {
2182db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2183db7b5426Sblueswir1                 int need_subpage = 0;
2184db7b5426Sblueswir1 
2185db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2186db7b5426Sblueswir1                               end_addr2, need_subpage);
2187db7b5426Sblueswir1 
21884254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2189db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2190db7b5426Sblueswir1                                            &p->phys_offset, IO_MEM_UNASSIGNED);
2191db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
2192db7b5426Sblueswir1                                      phys_offset);
2193db7b5426Sblueswir1                 }
2194db7b5426Sblueswir1             }
2195db7b5426Sblueswir1         }
219633417e70Sbellard     }
21979d42037bSbellard 
21989d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
21999d42037bSbellard        reset the modified entries */
22009d42037bSbellard     /* XXX: slow ! */
22019d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
22029d42037bSbellard         tlb_flush(env, 1);
22039d42037bSbellard     }
220433417e70Sbellard }
220533417e70Sbellard 
2206ba863458Sbellard /* XXX: temporary until new memory mapping API */
220700f82b8aSaurel32 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2208ba863458Sbellard {
2209ba863458Sbellard     PhysPageDesc *p;
2210ba863458Sbellard 
2211ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212ba863458Sbellard     if (!p)
2213ba863458Sbellard         return IO_MEM_UNASSIGNED;
2214ba863458Sbellard     return p->phys_offset;
2215ba863458Sbellard }
2216ba863458Sbellard 
2217e9a1ab19Sbellard /* XXX: better than nothing */
221800f82b8aSaurel32 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2219e9a1ab19Sbellard {
2220e9a1ab19Sbellard     ram_addr_t addr;
22217fb4fdcfSbalrog     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2222ed441467Sbellard         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2223ed441467Sbellard                 (uint64_t)size, (uint64_t)phys_ram_size);
2224e9a1ab19Sbellard         abort();
2225e9a1ab19Sbellard     }
2226e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2227e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2228e9a1ab19Sbellard     return addr;
2229e9a1ab19Sbellard }
2230e9a1ab19Sbellard 
2231e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2232e9a1ab19Sbellard {
2233e9a1ab19Sbellard }
2234e9a1ab19Sbellard 
2235a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
223633417e70Sbellard {
223767d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2238ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
223967d3b957Spbrook #endif
2240b4f0a316Sblueswir1 #ifdef TARGET_SPARC
22416c36d3faSblueswir1     do_unassigned_access(addr, 0, 0, 0);
2242f1ccf904Sths #elif TARGET_CRIS
2243f1ccf904Sths     do_unassigned_access(addr, 0, 0, 0);
2244b4f0a316Sblueswir1 #endif
224533417e70Sbellard     return 0;
224633417e70Sbellard }
224733417e70Sbellard 
2248a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
224933417e70Sbellard {
225067d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2251ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
225267d3b957Spbrook #endif
2253b4f0a316Sblueswir1 #ifdef TARGET_SPARC
22546c36d3faSblueswir1     do_unassigned_access(addr, 1, 0, 0);
2255f1ccf904Sths #elif TARGET_CRIS
2256f1ccf904Sths     do_unassigned_access(addr, 1, 0, 0);
2257b4f0a316Sblueswir1 #endif
225833417e70Sbellard }
225933417e70Sbellard 
226033417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
226133417e70Sbellard     unassigned_mem_readb,
226233417e70Sbellard     unassigned_mem_readb,
226333417e70Sbellard     unassigned_mem_readb,
226433417e70Sbellard };
226533417e70Sbellard 
226633417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
226733417e70Sbellard     unassigned_mem_writeb,
226833417e70Sbellard     unassigned_mem_writeb,
226933417e70Sbellard     unassigned_mem_writeb,
227033417e70Sbellard };
227133417e70Sbellard 
22720f459d16Spbrook static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
22730f459d16Spbrook                                 uint32_t val)
22741ccde1cbSbellard {
22753a7d929eSbellard     int dirty_flags;
22763a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22773a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
22783a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
22793a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
22803a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22813a7d929eSbellard #endif
22823a7d929eSbellard     }
22830f459d16Spbrook     stb_p(phys_ram_base + ram_addr, val);
2284f32fc648Sbellard #ifdef USE_KQEMU
2285f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2286f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2287f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2288f32fc648Sbellard #endif
2289f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2290f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2291f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2292f23db169Sbellard        flushed */
2293f23db169Sbellard     if (dirty_flags == 0xff)
22942e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
22951ccde1cbSbellard }
22961ccde1cbSbellard 
22970f459d16Spbrook static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
22980f459d16Spbrook                                 uint32_t val)
22991ccde1cbSbellard {
23003a7d929eSbellard     int dirty_flags;
23013a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23023a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
23033a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
23043a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
23053a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23063a7d929eSbellard #endif
23073a7d929eSbellard     }
23080f459d16Spbrook     stw_p(phys_ram_base + ram_addr, val);
2309f32fc648Sbellard #ifdef USE_KQEMU
2310f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2311f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2312f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2313f32fc648Sbellard #endif
2314f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2315f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2316f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2317f23db169Sbellard        flushed */
2318f23db169Sbellard     if (dirty_flags == 0xff)
23192e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
23201ccde1cbSbellard }
23211ccde1cbSbellard 
23220f459d16Spbrook static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
23230f459d16Spbrook                                 uint32_t val)
23241ccde1cbSbellard {
23253a7d929eSbellard     int dirty_flags;
23263a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23273a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
23283a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
23293a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
23303a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
23313a7d929eSbellard #endif
23323a7d929eSbellard     }
23330f459d16Spbrook     stl_p(phys_ram_base + ram_addr, val);
2334f32fc648Sbellard #ifdef USE_KQEMU
2335f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2336f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2337f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2338f32fc648Sbellard #endif
2339f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2340f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2341f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2342f23db169Sbellard        flushed */
2343f23db169Sbellard     if (dirty_flags == 0xff)
23442e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
23451ccde1cbSbellard }
23461ccde1cbSbellard 
23473a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
23483a7d929eSbellard     NULL, /* never used */
23493a7d929eSbellard     NULL, /* never used */
23503a7d929eSbellard     NULL, /* never used */
23513a7d929eSbellard };
23523a7d929eSbellard 
23531ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
23541ccde1cbSbellard     notdirty_mem_writeb,
23551ccde1cbSbellard     notdirty_mem_writew,
23561ccde1cbSbellard     notdirty_mem_writel,
23571ccde1cbSbellard };
23581ccde1cbSbellard 
23590f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
23600f459d16Spbrook static void check_watchpoint(int offset, int flags)
23610f459d16Spbrook {
23620f459d16Spbrook     CPUState *env = cpu_single_env;
23630f459d16Spbrook     target_ulong vaddr;
23640f459d16Spbrook     int i;
23650f459d16Spbrook 
23662e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
23670f459d16Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
23680f459d16Spbrook         if (vaddr == env->watchpoint[i].vaddr
23690f459d16Spbrook                 && (env->watchpoint[i].type & flags)) {
23700f459d16Spbrook             env->watchpoint_hit = i + 1;
23710f459d16Spbrook             cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
23720f459d16Spbrook             break;
23730f459d16Spbrook         }
23740f459d16Spbrook     }
23750f459d16Spbrook }
23760f459d16Spbrook 
23776658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
23786658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
23796658ffb8Spbrook    phys routines.  */
23806658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
23816658ffb8Spbrook {
23820f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
23836658ffb8Spbrook     return ldub_phys(addr);
23846658ffb8Spbrook }
23856658ffb8Spbrook 
23866658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
23876658ffb8Spbrook {
23880f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
23896658ffb8Spbrook     return lduw_phys(addr);
23906658ffb8Spbrook }
23916658ffb8Spbrook 
23926658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
23936658ffb8Spbrook {
23940f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
23956658ffb8Spbrook     return ldl_phys(addr);
23966658ffb8Spbrook }
23976658ffb8Spbrook 
23986658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
23996658ffb8Spbrook                              uint32_t val)
24006658ffb8Spbrook {
24010f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
24026658ffb8Spbrook     stb_phys(addr, val);
24036658ffb8Spbrook }
24046658ffb8Spbrook 
24056658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
24066658ffb8Spbrook                              uint32_t val)
24076658ffb8Spbrook {
24080f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
24096658ffb8Spbrook     stw_phys(addr, val);
24106658ffb8Spbrook }
24116658ffb8Spbrook 
24126658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
24136658ffb8Spbrook                              uint32_t val)
24146658ffb8Spbrook {
24150f459d16Spbrook     check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
24166658ffb8Spbrook     stl_phys(addr, val);
24176658ffb8Spbrook }
24186658ffb8Spbrook 
24196658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
24206658ffb8Spbrook     watch_mem_readb,
24216658ffb8Spbrook     watch_mem_readw,
24226658ffb8Spbrook     watch_mem_readl,
24236658ffb8Spbrook };
24246658ffb8Spbrook 
24256658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
24266658ffb8Spbrook     watch_mem_writeb,
24276658ffb8Spbrook     watch_mem_writew,
24286658ffb8Spbrook     watch_mem_writel,
24296658ffb8Spbrook };
24306658ffb8Spbrook 
2431db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2432db7b5426Sblueswir1                                  unsigned int len)
2433db7b5426Sblueswir1 {
2434db7b5426Sblueswir1     uint32_t ret;
2435db7b5426Sblueswir1     unsigned int idx;
2436db7b5426Sblueswir1 
2437db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2438db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2439db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2440db7b5426Sblueswir1            mmio, len, addr, idx);
2441db7b5426Sblueswir1 #endif
24423ee89922Sblueswir1     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2443db7b5426Sblueswir1 
2444db7b5426Sblueswir1     return ret;
2445db7b5426Sblueswir1 }
2446db7b5426Sblueswir1 
2447db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2448db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2449db7b5426Sblueswir1 {
2450db7b5426Sblueswir1     unsigned int idx;
2451db7b5426Sblueswir1 
2452db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2453db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2454db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2455db7b5426Sblueswir1            mmio, len, addr, idx, value);
2456db7b5426Sblueswir1 #endif
24573ee89922Sblueswir1     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2458db7b5426Sblueswir1 }
2459db7b5426Sblueswir1 
2460db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2461db7b5426Sblueswir1 {
2462db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2463db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2464db7b5426Sblueswir1 #endif
2465db7b5426Sblueswir1 
2466db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2467db7b5426Sblueswir1 }
2468db7b5426Sblueswir1 
2469db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2470db7b5426Sblueswir1                             uint32_t value)
2471db7b5426Sblueswir1 {
2472db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2473db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2474db7b5426Sblueswir1 #endif
2475db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2476db7b5426Sblueswir1 }
2477db7b5426Sblueswir1 
2478db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2479db7b5426Sblueswir1 {
2480db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2481db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2482db7b5426Sblueswir1 #endif
2483db7b5426Sblueswir1 
2484db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2485db7b5426Sblueswir1 }
2486db7b5426Sblueswir1 
2487db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2488db7b5426Sblueswir1                             uint32_t value)
2489db7b5426Sblueswir1 {
2490db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2491db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2492db7b5426Sblueswir1 #endif
2493db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2494db7b5426Sblueswir1 }
2495db7b5426Sblueswir1 
2496db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2497db7b5426Sblueswir1 {
2498db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2499db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2500db7b5426Sblueswir1 #endif
2501db7b5426Sblueswir1 
2502db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2503db7b5426Sblueswir1 }
2504db7b5426Sblueswir1 
2505db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2506db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2507db7b5426Sblueswir1 {
2508db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2509db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2510db7b5426Sblueswir1 #endif
2511db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2512db7b5426Sblueswir1 }
2513db7b5426Sblueswir1 
2514db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2515db7b5426Sblueswir1     &subpage_readb,
2516db7b5426Sblueswir1     &subpage_readw,
2517db7b5426Sblueswir1     &subpage_readl,
2518db7b5426Sblueswir1 };
2519db7b5426Sblueswir1 
2520db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2521db7b5426Sblueswir1     &subpage_writeb,
2522db7b5426Sblueswir1     &subpage_writew,
2523db7b5426Sblueswir1     &subpage_writel,
2524db7b5426Sblueswir1 };
2525db7b5426Sblueswir1 
2526db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
252700f82b8aSaurel32                              ram_addr_t memory)
2528db7b5426Sblueswir1 {
2529db7b5426Sblueswir1     int idx, eidx;
25304254fab8Sblueswir1     unsigned int i;
2531db7b5426Sblueswir1 
2532db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2533db7b5426Sblueswir1         return -1;
2534db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2535db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2536db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2537db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2538db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2539db7b5426Sblueswir1 #endif
2540db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2541db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
25424254fab8Sblueswir1         for (i = 0; i < 4; i++) {
25433ee89922Sblueswir1             if (io_mem_read[memory][i]) {
25443ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
25453ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
25464254fab8Sblueswir1             }
25473ee89922Sblueswir1             if (io_mem_write[memory][i]) {
25483ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
25493ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
25503ee89922Sblueswir1             }
25513ee89922Sblueswir1         }
2552db7b5426Sblueswir1     }
2553db7b5426Sblueswir1 
2554db7b5426Sblueswir1     return 0;
2555db7b5426Sblueswir1 }
2556db7b5426Sblueswir1 
255700f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
255800f82b8aSaurel32                            ram_addr_t orig_memory)
2559db7b5426Sblueswir1 {
2560db7b5426Sblueswir1     subpage_t *mmio;
2561db7b5426Sblueswir1     int subpage_memory;
2562db7b5426Sblueswir1 
2563db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
2564db7b5426Sblueswir1     if (mmio != NULL) {
2565db7b5426Sblueswir1         mmio->base = base;
2566db7b5426Sblueswir1         subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2567db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2568db7b5426Sblueswir1         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2569db7b5426Sblueswir1                mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2570db7b5426Sblueswir1 #endif
2571db7b5426Sblueswir1         *phys = subpage_memory | IO_MEM_SUBPAGE;
2572db7b5426Sblueswir1         subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2573db7b5426Sblueswir1     }
2574db7b5426Sblueswir1 
2575db7b5426Sblueswir1     return mmio;
2576db7b5426Sblueswir1 }
2577db7b5426Sblueswir1 
257833417e70Sbellard static void io_mem_init(void)
257933417e70Sbellard {
25803a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2581a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
25823a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
25831ccde1cbSbellard     io_mem_nb = 5;
25841ccde1cbSbellard 
25850f459d16Spbrook     io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
25866658ffb8Spbrook                                           watch_mem_write, NULL);
25871ccde1cbSbellard     /* alloc dirty bits array */
25880a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
25893a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
259033417e70Sbellard }
259133417e70Sbellard 
259233417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
259333417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
25943ee89922Sblueswir1    2). Functions can be omitted with a NULL function pointer. The
25953ee89922Sblueswir1    registered functions may be modified dynamically later.
25963ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
25974254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
25984254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
25994254fab8Sblueswir1    returned if error. */
260033417e70Sbellard int cpu_register_io_memory(int io_index,
260133417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2602a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2603a4193c8aSbellard                            void *opaque)
260433417e70Sbellard {
26054254fab8Sblueswir1     int i, subwidth = 0;
260633417e70Sbellard 
260733417e70Sbellard     if (io_index <= 0) {
2608b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
260933417e70Sbellard             return -1;
261033417e70Sbellard         io_index = io_mem_nb++;
261133417e70Sbellard     } else {
261233417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
261333417e70Sbellard             return -1;
261433417e70Sbellard     }
261533417e70Sbellard 
261633417e70Sbellard     for(i = 0;i < 3; i++) {
26174254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
26184254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
261933417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
262033417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
262133417e70Sbellard     }
2622a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
26234254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
262433417e70Sbellard }
262561382a50Sbellard 
26268926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
26278926b517Sbellard {
26288926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
26298926b517Sbellard }
26308926b517Sbellard 
26318926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
26328926b517Sbellard {
26338926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
26348926b517Sbellard }
26358926b517Sbellard 
2636e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2637e2eef170Spbrook 
263813eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
263913eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
26402e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
264113eb76e0Sbellard                             int len, int is_write)
264213eb76e0Sbellard {
264313eb76e0Sbellard     int l, flags;
264413eb76e0Sbellard     target_ulong page;
264553a5960aSpbrook     void * p;
264613eb76e0Sbellard 
264713eb76e0Sbellard     while (len > 0) {
264813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
264913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
265013eb76e0Sbellard         if (l > len)
265113eb76e0Sbellard             l = len;
265213eb76e0Sbellard         flags = page_get_flags(page);
265313eb76e0Sbellard         if (!(flags & PAGE_VALID))
265413eb76e0Sbellard             return;
265513eb76e0Sbellard         if (is_write) {
265613eb76e0Sbellard             if (!(flags & PAGE_WRITE))
265713eb76e0Sbellard                 return;
2658579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
265972fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2660579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2661579a97f7Sbellard                 return;
266272fb7daaSaurel32             memcpy(p, buf, l);
266372fb7daaSaurel32             unlock_user(p, addr, l);
266413eb76e0Sbellard         } else {
266513eb76e0Sbellard             if (!(flags & PAGE_READ))
266613eb76e0Sbellard                 return;
2667579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
266872fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2669579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2670579a97f7Sbellard                 return;
267172fb7daaSaurel32             memcpy(buf, p, l);
26725b257578Saurel32             unlock_user(p, addr, 0);
267313eb76e0Sbellard         }
267413eb76e0Sbellard         len -= l;
267513eb76e0Sbellard         buf += l;
267613eb76e0Sbellard         addr += l;
267713eb76e0Sbellard     }
267813eb76e0Sbellard }
26798df1cd07Sbellard 
268013eb76e0Sbellard #else
26812e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
268213eb76e0Sbellard                             int len, int is_write)
268313eb76e0Sbellard {
268413eb76e0Sbellard     int l, io_index;
268513eb76e0Sbellard     uint8_t *ptr;
268613eb76e0Sbellard     uint32_t val;
26872e12669aSbellard     target_phys_addr_t page;
26882e12669aSbellard     unsigned long pd;
268992e873b9Sbellard     PhysPageDesc *p;
269013eb76e0Sbellard 
269113eb76e0Sbellard     while (len > 0) {
269213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
269313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
269413eb76e0Sbellard         if (l > len)
269513eb76e0Sbellard             l = len;
269692e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
269713eb76e0Sbellard         if (!p) {
269813eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
269913eb76e0Sbellard         } else {
270013eb76e0Sbellard             pd = p->phys_offset;
270113eb76e0Sbellard         }
270213eb76e0Sbellard 
270313eb76e0Sbellard         if (is_write) {
27043a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
270513eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27066a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
27076a00d601Sbellard                    potential bugs */
270813eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
27091c213d19Sbellard                     /* 32 bit write access */
2710c27004ecSbellard                     val = ldl_p(buf);
2711a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
271213eb76e0Sbellard                     l = 4;
271313eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
27141c213d19Sbellard                     /* 16 bit write access */
2715c27004ecSbellard                     val = lduw_p(buf);
2716a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
271713eb76e0Sbellard                     l = 2;
271813eb76e0Sbellard                 } else {
27191c213d19Sbellard                     /* 8 bit write access */
2720c27004ecSbellard                     val = ldub_p(buf);
2721a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
272213eb76e0Sbellard                     l = 1;
272313eb76e0Sbellard                 }
272413eb76e0Sbellard             } else {
2725b448f2f3Sbellard                 unsigned long addr1;
2726b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
272713eb76e0Sbellard                 /* RAM case */
2728b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
272913eb76e0Sbellard                 memcpy(ptr, buf, l);
27303a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2731b448f2f3Sbellard                     /* invalidate code */
2732b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2733b448f2f3Sbellard                     /* set dirty bit */
2734f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2735f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
273613eb76e0Sbellard                 }
27373a7d929eSbellard             }
273813eb76e0Sbellard         } else {
27392a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
27402a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
274113eb76e0Sbellard                 /* I/O case */
274213eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
274313eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
274413eb76e0Sbellard                     /* 32 bit read access */
2745a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2746c27004ecSbellard                     stl_p(buf, val);
274713eb76e0Sbellard                     l = 4;
274813eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
274913eb76e0Sbellard                     /* 16 bit read access */
2750a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2751c27004ecSbellard                     stw_p(buf, val);
275213eb76e0Sbellard                     l = 2;
275313eb76e0Sbellard                 } else {
27541c213d19Sbellard                     /* 8 bit read access */
2755a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2756c27004ecSbellard                     stb_p(buf, val);
275713eb76e0Sbellard                     l = 1;
275813eb76e0Sbellard                 }
275913eb76e0Sbellard             } else {
276013eb76e0Sbellard                 /* RAM case */
276113eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
276213eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
276313eb76e0Sbellard                 memcpy(buf, ptr, l);
276413eb76e0Sbellard             }
276513eb76e0Sbellard         }
276613eb76e0Sbellard         len -= l;
276713eb76e0Sbellard         buf += l;
276813eb76e0Sbellard         addr += l;
276913eb76e0Sbellard     }
277013eb76e0Sbellard }
27718df1cd07Sbellard 
2772d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2773d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2774d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2775d0ecd2aaSbellard {
2776d0ecd2aaSbellard     int l;
2777d0ecd2aaSbellard     uint8_t *ptr;
2778d0ecd2aaSbellard     target_phys_addr_t page;
2779d0ecd2aaSbellard     unsigned long pd;
2780d0ecd2aaSbellard     PhysPageDesc *p;
2781d0ecd2aaSbellard 
2782d0ecd2aaSbellard     while (len > 0) {
2783d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2784d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2785d0ecd2aaSbellard         if (l > len)
2786d0ecd2aaSbellard             l = len;
2787d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2788d0ecd2aaSbellard         if (!p) {
2789d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2790d0ecd2aaSbellard         } else {
2791d0ecd2aaSbellard             pd = p->phys_offset;
2792d0ecd2aaSbellard         }
2793d0ecd2aaSbellard 
2794d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
27952a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
27962a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2797d0ecd2aaSbellard             /* do nothing */
2798d0ecd2aaSbellard         } else {
2799d0ecd2aaSbellard             unsigned long addr1;
2800d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2801d0ecd2aaSbellard             /* ROM/RAM case */
2802d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2803d0ecd2aaSbellard             memcpy(ptr, buf, l);
2804d0ecd2aaSbellard         }
2805d0ecd2aaSbellard         len -= l;
2806d0ecd2aaSbellard         buf += l;
2807d0ecd2aaSbellard         addr += l;
2808d0ecd2aaSbellard     }
2809d0ecd2aaSbellard }
2810d0ecd2aaSbellard 
2811d0ecd2aaSbellard 
28128df1cd07Sbellard /* warning: addr must be aligned */
28138df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
28148df1cd07Sbellard {
28158df1cd07Sbellard     int io_index;
28168df1cd07Sbellard     uint8_t *ptr;
28178df1cd07Sbellard     uint32_t val;
28188df1cd07Sbellard     unsigned long pd;
28198df1cd07Sbellard     PhysPageDesc *p;
28208df1cd07Sbellard 
28218df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
28228df1cd07Sbellard     if (!p) {
28238df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
28248df1cd07Sbellard     } else {
28258df1cd07Sbellard         pd = p->phys_offset;
28268df1cd07Sbellard     }
28278df1cd07Sbellard 
28282a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
28292a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
28308df1cd07Sbellard         /* I/O case */
28318df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
28328df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
28338df1cd07Sbellard     } else {
28348df1cd07Sbellard         /* RAM case */
28358df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
28368df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
28378df1cd07Sbellard         val = ldl_p(ptr);
28388df1cd07Sbellard     }
28398df1cd07Sbellard     return val;
28408df1cd07Sbellard }
28418df1cd07Sbellard 
284284b7b8e7Sbellard /* warning: addr must be aligned */
284384b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
284484b7b8e7Sbellard {
284584b7b8e7Sbellard     int io_index;
284684b7b8e7Sbellard     uint8_t *ptr;
284784b7b8e7Sbellard     uint64_t val;
284884b7b8e7Sbellard     unsigned long pd;
284984b7b8e7Sbellard     PhysPageDesc *p;
285084b7b8e7Sbellard 
285184b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
285284b7b8e7Sbellard     if (!p) {
285384b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
285484b7b8e7Sbellard     } else {
285584b7b8e7Sbellard         pd = p->phys_offset;
285684b7b8e7Sbellard     }
285784b7b8e7Sbellard 
28582a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
28592a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
286084b7b8e7Sbellard         /* I/O case */
286184b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
286284b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
286384b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
286484b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
286584b7b8e7Sbellard #else
286684b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
286784b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
286884b7b8e7Sbellard #endif
286984b7b8e7Sbellard     } else {
287084b7b8e7Sbellard         /* RAM case */
287184b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
287284b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
287384b7b8e7Sbellard         val = ldq_p(ptr);
287484b7b8e7Sbellard     }
287584b7b8e7Sbellard     return val;
287684b7b8e7Sbellard }
287784b7b8e7Sbellard 
2878aab33094Sbellard /* XXX: optimize */
2879aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2880aab33094Sbellard {
2881aab33094Sbellard     uint8_t val;
2882aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2883aab33094Sbellard     return val;
2884aab33094Sbellard }
2885aab33094Sbellard 
2886aab33094Sbellard /* XXX: optimize */
2887aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2888aab33094Sbellard {
2889aab33094Sbellard     uint16_t val;
2890aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2891aab33094Sbellard     return tswap16(val);
2892aab33094Sbellard }
2893aab33094Sbellard 
28948df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
28958df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
28968df1cd07Sbellard    bits are used to track modified PTEs */
28978df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
28988df1cd07Sbellard {
28998df1cd07Sbellard     int io_index;
29008df1cd07Sbellard     uint8_t *ptr;
29018df1cd07Sbellard     unsigned long pd;
29028df1cd07Sbellard     PhysPageDesc *p;
29038df1cd07Sbellard 
29048df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
29058df1cd07Sbellard     if (!p) {
29068df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
29078df1cd07Sbellard     } else {
29088df1cd07Sbellard         pd = p->phys_offset;
29098df1cd07Sbellard     }
29108df1cd07Sbellard 
29113a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29128df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29138df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
29148df1cd07Sbellard     } else {
29158df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
29168df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
29178df1cd07Sbellard         stl_p(ptr, val);
29188df1cd07Sbellard     }
29198df1cd07Sbellard }
29208df1cd07Sbellard 
2921bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2922bc98a7efSj_mayer {
2923bc98a7efSj_mayer     int io_index;
2924bc98a7efSj_mayer     uint8_t *ptr;
2925bc98a7efSj_mayer     unsigned long pd;
2926bc98a7efSj_mayer     PhysPageDesc *p;
2927bc98a7efSj_mayer 
2928bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2929bc98a7efSj_mayer     if (!p) {
2930bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
2931bc98a7efSj_mayer     } else {
2932bc98a7efSj_mayer         pd = p->phys_offset;
2933bc98a7efSj_mayer     }
2934bc98a7efSj_mayer 
2935bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2936bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2937bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
2938bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2939bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2940bc98a7efSj_mayer #else
2941bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2942bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2943bc98a7efSj_mayer #endif
2944bc98a7efSj_mayer     } else {
2945bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2946bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
2947bc98a7efSj_mayer         stq_p(ptr, val);
2948bc98a7efSj_mayer     }
2949bc98a7efSj_mayer }
2950bc98a7efSj_mayer 
29518df1cd07Sbellard /* warning: addr must be aligned */
29528df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
29538df1cd07Sbellard {
29548df1cd07Sbellard     int io_index;
29558df1cd07Sbellard     uint8_t *ptr;
29568df1cd07Sbellard     unsigned long pd;
29578df1cd07Sbellard     PhysPageDesc *p;
29588df1cd07Sbellard 
29598df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
29608df1cd07Sbellard     if (!p) {
29618df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
29628df1cd07Sbellard     } else {
29638df1cd07Sbellard         pd = p->phys_offset;
29648df1cd07Sbellard     }
29658df1cd07Sbellard 
29663a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
29678df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
29688df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
29698df1cd07Sbellard     } else {
29708df1cd07Sbellard         unsigned long addr1;
29718df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
29728df1cd07Sbellard         /* RAM case */
29738df1cd07Sbellard         ptr = phys_ram_base + addr1;
29748df1cd07Sbellard         stl_p(ptr, val);
29753a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
29768df1cd07Sbellard             /* invalidate code */
29778df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
29788df1cd07Sbellard             /* set dirty bit */
2979f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2980f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
29818df1cd07Sbellard         }
29828df1cd07Sbellard     }
29833a7d929eSbellard }
29848df1cd07Sbellard 
2985aab33094Sbellard /* XXX: optimize */
2986aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2987aab33094Sbellard {
2988aab33094Sbellard     uint8_t v = val;
2989aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2990aab33094Sbellard }
2991aab33094Sbellard 
2992aab33094Sbellard /* XXX: optimize */
2993aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2994aab33094Sbellard {
2995aab33094Sbellard     uint16_t v = tswap16(val);
2996aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2997aab33094Sbellard }
2998aab33094Sbellard 
2999aab33094Sbellard /* XXX: optimize */
3000aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
3001aab33094Sbellard {
3002aab33094Sbellard     val = tswap64(val);
3003aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3004aab33094Sbellard }
3005aab33094Sbellard 
300613eb76e0Sbellard #endif
300713eb76e0Sbellard 
300813eb76e0Sbellard /* virtual memory access for debug */
3009b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3010b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
301113eb76e0Sbellard {
301213eb76e0Sbellard     int l;
30139b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
30149b3c35e0Sj_mayer     target_ulong page;
301513eb76e0Sbellard 
301613eb76e0Sbellard     while (len > 0) {
301713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
301813eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
301913eb76e0Sbellard         /* if no physical page mapped, return an error */
302013eb76e0Sbellard         if (phys_addr == -1)
302113eb76e0Sbellard             return -1;
302213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
302313eb76e0Sbellard         if (l > len)
302413eb76e0Sbellard             l = len;
3025b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3026b448f2f3Sbellard                                buf, l, is_write);
302713eb76e0Sbellard         len -= l;
302813eb76e0Sbellard         buf += l;
302913eb76e0Sbellard         addr += l;
303013eb76e0Sbellard     }
303113eb76e0Sbellard     return 0;
303213eb76e0Sbellard }
303313eb76e0Sbellard 
30342e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
30352e70f6efSpbrook    must be at the end of the TB */
30362e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
30372e70f6efSpbrook {
30382e70f6efSpbrook     TranslationBlock *tb;
30392e70f6efSpbrook     uint32_t n, cflags;
30402e70f6efSpbrook     target_ulong pc, cs_base;
30412e70f6efSpbrook     uint64_t flags;
30422e70f6efSpbrook 
30432e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
30442e70f6efSpbrook     if (!tb) {
30452e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
30462e70f6efSpbrook                   retaddr);
30472e70f6efSpbrook     }
30482e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
30492e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
30502e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3051bf20dc07Sths        occurred.  */
30522e70f6efSpbrook     n = n - env->icount_decr.u16.low;
30532e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
30542e70f6efSpbrook     n++;
30552e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
30562e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3057bf20dc07Sths        the first instruction in a TB then re-execute the preceding
30582e70f6efSpbrook        branch.  */
30592e70f6efSpbrook #if defined(TARGET_MIPS)
30602e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
30612e70f6efSpbrook         env->active_tc.PC -= 4;
30622e70f6efSpbrook         env->icount_decr.u16.low++;
30632e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
30642e70f6efSpbrook     }
30652e70f6efSpbrook #elif defined(TARGET_SH4)
30662e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
30672e70f6efSpbrook             && n > 1) {
30682e70f6efSpbrook         env->pc -= 2;
30692e70f6efSpbrook         env->icount_decr.u16.low++;
30702e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
30712e70f6efSpbrook     }
30722e70f6efSpbrook #endif
30732e70f6efSpbrook     /* This should never happen.  */
30742e70f6efSpbrook     if (n > CF_COUNT_MASK)
30752e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
30762e70f6efSpbrook 
30772e70f6efSpbrook     cflags = n | CF_LAST_IO;
30782e70f6efSpbrook     pc = tb->pc;
30792e70f6efSpbrook     cs_base = tb->cs_base;
30802e70f6efSpbrook     flags = tb->flags;
30812e70f6efSpbrook     tb_phys_invalidate(tb, -1);
30822e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
30832e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
30842e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3085bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
30862e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
30872e70f6efSpbrook        repeating the fault, which is horribly inefficient.
30882e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
30892e70f6efSpbrook        second new TB.  */
30902e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
30912e70f6efSpbrook }
30922e70f6efSpbrook 
3093e3db7226Sbellard void dump_exec_info(FILE *f,
3094e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3095e3db7226Sbellard {
3096e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3097e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3098e3db7226Sbellard     TranslationBlock *tb;
3099e3db7226Sbellard 
3100e3db7226Sbellard     target_code_size = 0;
3101e3db7226Sbellard     max_target_code_size = 0;
3102e3db7226Sbellard     cross_page = 0;
3103e3db7226Sbellard     direct_jmp_count = 0;
3104e3db7226Sbellard     direct_jmp2_count = 0;
3105e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3106e3db7226Sbellard         tb = &tbs[i];
3107e3db7226Sbellard         target_code_size += tb->size;
3108e3db7226Sbellard         if (tb->size > max_target_code_size)
3109e3db7226Sbellard             max_target_code_size = tb->size;
3110e3db7226Sbellard         if (tb->page_addr[1] != -1)
3111e3db7226Sbellard             cross_page++;
3112e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3113e3db7226Sbellard             direct_jmp_count++;
3114e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3115e3db7226Sbellard                 direct_jmp2_count++;
3116e3db7226Sbellard             }
3117e3db7226Sbellard         }
3118e3db7226Sbellard     }
3119e3db7226Sbellard     /* XXX: avoid using doubles ? */
312057fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
312126a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
312226a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
312326a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
312426a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3125e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3126e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3127e3db7226Sbellard                 max_target_code_size);
3128e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3129e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3130e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3131e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3132e3db7226Sbellard             cross_page,
3133e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3134e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3135e3db7226Sbellard                 direct_jmp_count,
3136e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3137e3db7226Sbellard                 direct_jmp2_count,
3138e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
313957fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3140e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3141e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3142e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3143b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3144e3db7226Sbellard }
3145e3db7226Sbellard 
314661382a50Sbellard #if !defined(CONFIG_USER_ONLY)
314761382a50Sbellard 
314861382a50Sbellard #define MMUSUFFIX _cmmu
314961382a50Sbellard #define GETPC() NULL
315061382a50Sbellard #define env cpu_single_env
3151b769d8feSbellard #define SOFTMMU_CODE_ACCESS
315261382a50Sbellard 
315361382a50Sbellard #define SHIFT 0
315461382a50Sbellard #include "softmmu_template.h"
315561382a50Sbellard 
315661382a50Sbellard #define SHIFT 1
315761382a50Sbellard #include "softmmu_template.h"
315861382a50Sbellard 
315961382a50Sbellard #define SHIFT 2
316061382a50Sbellard #include "softmmu_template.h"
316161382a50Sbellard 
316261382a50Sbellard #define SHIFT 3
316361382a50Sbellard #include "softmmu_template.h"
316461382a50Sbellard 
316561382a50Sbellard #undef env
316661382a50Sbellard 
316761382a50Sbellard #endif
3168