xref: /qemu/system/physmem.c (revision e9a1ab19d196aa50619fd8b77157bd11a5a8aa01)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
3953a5960aSpbrook #endif
4054936004Sbellard 
41fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4266e85a21Sbellard //#define DEBUG_FLUSH
439fa3e853Sbellard //#define DEBUG_TLB
4467d3b957Spbrook //#define DEBUG_UNASSIGNED
45fd6ce8f6Sbellard 
46fd6ce8f6Sbellard /* make various TB consistency checks */
47fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4898857888Sbellard //#define DEBUG_TLB_CHECK
49fd6ce8f6Sbellard 
5099773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5199773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5299773bd4Spbrook #undef DEBUG_TB_CHECK
5399773bd4Spbrook #endif
5499773bd4Spbrook 
55fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
56fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57fd6ce8f6Sbellard 
589fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
599fa3e853Sbellard 
609fa3e853Sbellard #define MMAP_AREA_START        0x00000000
619fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
62fd6ce8f6Sbellard 
63108c49b8Sbellard #if defined(TARGET_SPARC64)
64108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
65108c49b8Sbellard #elif defined(TARGET_PPC64)
66108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
67108c49b8Sbellard #else
68108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
70108c49b8Sbellard #endif
71108c49b8Sbellard 
72fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
739fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
74fd6ce8f6Sbellard int nb_tbs;
75eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
76eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77fd6ce8f6Sbellard 
78b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79fd6ce8f6Sbellard uint8_t *code_gen_ptr;
80fd6ce8f6Sbellard 
819fa3e853Sbellard int phys_ram_size;
829fa3e853Sbellard int phys_ram_fd;
839fa3e853Sbellard uint8_t *phys_ram_base;
841ccde1cbSbellard uint8_t *phys_ram_dirty;
85e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
869fa3e853Sbellard 
876a00d601Sbellard CPUState *first_cpu;
886a00d601Sbellard /* current CPU in the current thread. It is only valid inside
896a00d601Sbellard    cpu_exec() */
906a00d601Sbellard CPUState *cpu_single_env;
916a00d601Sbellard 
9254936004Sbellard typedef struct PageDesc {
9392e873b9Sbellard     /* list of TBs intersecting this ram page */
94fd6ce8f6Sbellard     TranslationBlock *first_tb;
959fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
969fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
979fa3e853Sbellard     unsigned int code_write_count;
989fa3e853Sbellard     uint8_t *code_bitmap;
999fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1009fa3e853Sbellard     unsigned long flags;
1019fa3e853Sbellard #endif
10254936004Sbellard } PageDesc;
10354936004Sbellard 
10492e873b9Sbellard typedef struct PhysPageDesc {
10592e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
106e04f40b5Sbellard     uint32_t phys_offset;
10792e873b9Sbellard } PhysPageDesc;
10892e873b9Sbellard 
10954936004Sbellard #define L2_BITS 10
11054936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
11154936004Sbellard 
11254936004Sbellard #define L1_SIZE (1 << L1_BITS)
11354936004Sbellard #define L2_SIZE (1 << L2_BITS)
11454936004Sbellard 
11533417e70Sbellard static void io_mem_init(void);
116fd6ce8f6Sbellard 
11783fb7adfSbellard unsigned long qemu_real_host_page_size;
11883fb7adfSbellard unsigned long qemu_host_page_bits;
11983fb7adfSbellard unsigned long qemu_host_page_size;
12083fb7adfSbellard unsigned long qemu_host_page_mask;
12154936004Sbellard 
12292e873b9Sbellard /* XXX: for system emulation, it could just be an array */
12354936004Sbellard static PageDesc *l1_map[L1_SIZE];
1240a962c02Sbellard PhysPageDesc **l1_phys_map;
12554936004Sbellard 
12633417e70Sbellard /* io memory support */
12733417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
12833417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
13033417e70Sbellard static int io_mem_nb;
13133417e70Sbellard 
13234865134Sbellard /* log support */
13334865134Sbellard char *logfilename = "/tmp/qemu.log";
13434865134Sbellard FILE *logfile;
13534865134Sbellard int loglevel;
13634865134Sbellard 
137e3db7226Sbellard /* statistics */
138e3db7226Sbellard static int tlb_flush_count;
139e3db7226Sbellard static int tb_flush_count;
140e3db7226Sbellard static int tb_phys_invalidate_count;
141e3db7226Sbellard 
142b346ff46Sbellard static void page_init(void)
14354936004Sbellard {
14483fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
14554936004Sbellard        TARGET_PAGE_SIZE */
14667b915a5Sbellard #ifdef _WIN32
147d5a8f07cSbellard     {
148d5a8f07cSbellard         SYSTEM_INFO system_info;
149d5a8f07cSbellard         DWORD old_protect;
150d5a8f07cSbellard 
151d5a8f07cSbellard         GetSystemInfo(&system_info);
152d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
153d5a8f07cSbellard 
154d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
155d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
156d5a8f07cSbellard     }
15767b915a5Sbellard #else
15883fb7adfSbellard     qemu_real_host_page_size = getpagesize();
159d5a8f07cSbellard     {
160d5a8f07cSbellard         unsigned long start, end;
161d5a8f07cSbellard 
162d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
163d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
164d5a8f07cSbellard 
165d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
166d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
167d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
168d5a8f07cSbellard 
169d5a8f07cSbellard         mprotect((void *)start, end - start,
170d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
171d5a8f07cSbellard     }
17267b915a5Sbellard #endif
173d5a8f07cSbellard 
17483fb7adfSbellard     if (qemu_host_page_size == 0)
17583fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
17683fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
17783fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
17883fb7adfSbellard     qemu_host_page_bits = 0;
17983fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
18083fb7adfSbellard         qemu_host_page_bits++;
18183fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
182108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
183108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
18454936004Sbellard }
18554936004Sbellard 
186fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
18754936004Sbellard {
18854936004Sbellard     PageDesc **lp, *p;
18954936004Sbellard 
19054936004Sbellard     lp = &l1_map[index >> L2_BITS];
19154936004Sbellard     p = *lp;
19254936004Sbellard     if (!p) {
19354936004Sbellard         /* allocate if not found */
19459817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
195fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
19654936004Sbellard         *lp = p;
19754936004Sbellard     }
19854936004Sbellard     return p + (index & (L2_SIZE - 1));
19954936004Sbellard }
20054936004Sbellard 
201fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
20254936004Sbellard {
20354936004Sbellard     PageDesc *p;
20454936004Sbellard 
20554936004Sbellard     p = l1_map[index >> L2_BITS];
20654936004Sbellard     if (!p)
20754936004Sbellard         return 0;
208fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
20954936004Sbellard }
21054936004Sbellard 
211108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
21292e873b9Sbellard {
213108c49b8Sbellard     void **lp, **p;
214e3f4e2a4Spbrook     PhysPageDesc *pd;
21592e873b9Sbellard 
216108c49b8Sbellard     p = (void **)l1_phys_map;
217108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
218108c49b8Sbellard 
219108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
220108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
221108c49b8Sbellard #endif
222108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
22392e873b9Sbellard     p = *lp;
22492e873b9Sbellard     if (!p) {
22592e873b9Sbellard         /* allocate if not found */
226108c49b8Sbellard         if (!alloc)
227108c49b8Sbellard             return NULL;
228108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
229108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
230108c49b8Sbellard         *lp = p;
231108c49b8Sbellard     }
232108c49b8Sbellard #endif
233108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
234e3f4e2a4Spbrook     pd = *lp;
235e3f4e2a4Spbrook     if (!pd) {
236e3f4e2a4Spbrook         int i;
237108c49b8Sbellard         /* allocate if not found */
238108c49b8Sbellard         if (!alloc)
239108c49b8Sbellard             return NULL;
240e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
241e3f4e2a4Spbrook         *lp = pd;
242e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
243e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
24492e873b9Sbellard     }
245e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
24692e873b9Sbellard }
24792e873b9Sbellard 
248108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
24992e873b9Sbellard {
250108c49b8Sbellard     return phys_page_find_alloc(index, 0);
25192e873b9Sbellard }
25292e873b9Sbellard 
2539fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2546a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
2553a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2563a7d929eSbellard                                     target_ulong vaddr);
2579fa3e853Sbellard #endif
258fd6ce8f6Sbellard 
2596a00d601Sbellard void cpu_exec_init(CPUState *env)
260fd6ce8f6Sbellard {
2616a00d601Sbellard     CPUState **penv;
2626a00d601Sbellard     int cpu_index;
2636a00d601Sbellard 
264fd6ce8f6Sbellard     if (!code_gen_ptr) {
265fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
266b346ff46Sbellard         page_init();
26733417e70Sbellard         io_mem_init();
268fd6ce8f6Sbellard     }
2696a00d601Sbellard     env->next_cpu = NULL;
2706a00d601Sbellard     penv = &first_cpu;
2716a00d601Sbellard     cpu_index = 0;
2726a00d601Sbellard     while (*penv != NULL) {
2736a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
2746a00d601Sbellard         cpu_index++;
2756a00d601Sbellard     }
2766a00d601Sbellard     env->cpu_index = cpu_index;
2776a00d601Sbellard     *penv = env;
278fd6ce8f6Sbellard }
279fd6ce8f6Sbellard 
2809fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
2819fa3e853Sbellard {
2829fa3e853Sbellard     if (p->code_bitmap) {
28359817ccbSbellard         qemu_free(p->code_bitmap);
2849fa3e853Sbellard         p->code_bitmap = NULL;
2859fa3e853Sbellard     }
2869fa3e853Sbellard     p->code_write_count = 0;
2879fa3e853Sbellard }
2889fa3e853Sbellard 
289fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
290fd6ce8f6Sbellard static void page_flush_tb(void)
291fd6ce8f6Sbellard {
292fd6ce8f6Sbellard     int i, j;
293fd6ce8f6Sbellard     PageDesc *p;
294fd6ce8f6Sbellard 
295fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
296fd6ce8f6Sbellard         p = l1_map[i];
297fd6ce8f6Sbellard         if (p) {
2989fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
2999fa3e853Sbellard                 p->first_tb = NULL;
3009fa3e853Sbellard                 invalidate_page_bitmap(p);
3019fa3e853Sbellard                 p++;
3029fa3e853Sbellard             }
303fd6ce8f6Sbellard         }
304fd6ce8f6Sbellard     }
305fd6ce8f6Sbellard }
306fd6ce8f6Sbellard 
307fd6ce8f6Sbellard /* flush all the translation blocks */
308d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
3096a00d601Sbellard void tb_flush(CPUState *env1)
310fd6ce8f6Sbellard {
3116a00d601Sbellard     CPUState *env;
3120124311eSbellard #if defined(DEBUG_FLUSH)
313fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
314fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
315fd6ce8f6Sbellard            nb_tbs,
3160124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
317fd6ce8f6Sbellard #endif
318fd6ce8f6Sbellard     nb_tbs = 0;
3196a00d601Sbellard 
3206a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3218a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3226a00d601Sbellard     }
3239fa3e853Sbellard 
3248a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
325fd6ce8f6Sbellard     page_flush_tb();
3269fa3e853Sbellard 
327fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
328d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
329d4e8164fSbellard        expensive */
330e3db7226Sbellard     tb_flush_count++;
331fd6ce8f6Sbellard }
332fd6ce8f6Sbellard 
333fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
334fd6ce8f6Sbellard 
335fd6ce8f6Sbellard static void tb_invalidate_check(unsigned long address)
336fd6ce8f6Sbellard {
337fd6ce8f6Sbellard     TranslationBlock *tb;
338fd6ce8f6Sbellard     int i;
339fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
34099773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
34199773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
342fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
343fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
344fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
34599773bd4Spbrook                        address, (long)tb->pc, tb->size);
346fd6ce8f6Sbellard             }
347fd6ce8f6Sbellard         }
348fd6ce8f6Sbellard     }
349fd6ce8f6Sbellard }
350fd6ce8f6Sbellard 
351fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
352fd6ce8f6Sbellard static void tb_page_check(void)
353fd6ce8f6Sbellard {
354fd6ce8f6Sbellard     TranslationBlock *tb;
355fd6ce8f6Sbellard     int i, flags1, flags2;
356fd6ce8f6Sbellard 
35799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
35899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
360fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
361fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
362fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
36399773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
364fd6ce8f6Sbellard             }
365fd6ce8f6Sbellard         }
366fd6ce8f6Sbellard     }
367fd6ce8f6Sbellard }
368fd6ce8f6Sbellard 
369d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
370d4e8164fSbellard {
371d4e8164fSbellard     TranslationBlock *tb1;
372d4e8164fSbellard     unsigned int n1;
373d4e8164fSbellard 
374d4e8164fSbellard     /* suppress any remaining jumps to this TB */
375d4e8164fSbellard     tb1 = tb->jmp_first;
376d4e8164fSbellard     for(;;) {
377d4e8164fSbellard         n1 = (long)tb1 & 3;
378d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
379d4e8164fSbellard         if (n1 == 2)
380d4e8164fSbellard             break;
381d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
382d4e8164fSbellard     }
383d4e8164fSbellard     /* check end of list */
384d4e8164fSbellard     if (tb1 != tb) {
385d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
386d4e8164fSbellard     }
387d4e8164fSbellard }
388d4e8164fSbellard 
389fd6ce8f6Sbellard #endif
390fd6ce8f6Sbellard 
391fd6ce8f6Sbellard /* invalidate one TB */
392fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
393fd6ce8f6Sbellard                              int next_offset)
394fd6ce8f6Sbellard {
395fd6ce8f6Sbellard     TranslationBlock *tb1;
396fd6ce8f6Sbellard     for(;;) {
397fd6ce8f6Sbellard         tb1 = *ptb;
398fd6ce8f6Sbellard         if (tb1 == tb) {
399fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
400fd6ce8f6Sbellard             break;
401fd6ce8f6Sbellard         }
402fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
403fd6ce8f6Sbellard     }
404fd6ce8f6Sbellard }
405fd6ce8f6Sbellard 
4069fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
4079fa3e853Sbellard {
4089fa3e853Sbellard     TranslationBlock *tb1;
4099fa3e853Sbellard     unsigned int n1;
4109fa3e853Sbellard 
4119fa3e853Sbellard     for(;;) {
4129fa3e853Sbellard         tb1 = *ptb;
4139fa3e853Sbellard         n1 = (long)tb1 & 3;
4149fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4159fa3e853Sbellard         if (tb1 == tb) {
4169fa3e853Sbellard             *ptb = tb1->page_next[n1];
4179fa3e853Sbellard             break;
4189fa3e853Sbellard         }
4199fa3e853Sbellard         ptb = &tb1->page_next[n1];
4209fa3e853Sbellard     }
4219fa3e853Sbellard }
4229fa3e853Sbellard 
423d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
424d4e8164fSbellard {
425d4e8164fSbellard     TranslationBlock *tb1, **ptb;
426d4e8164fSbellard     unsigned int n1;
427d4e8164fSbellard 
428d4e8164fSbellard     ptb = &tb->jmp_next[n];
429d4e8164fSbellard     tb1 = *ptb;
430d4e8164fSbellard     if (tb1) {
431d4e8164fSbellard         /* find tb(n) in circular list */
432d4e8164fSbellard         for(;;) {
433d4e8164fSbellard             tb1 = *ptb;
434d4e8164fSbellard             n1 = (long)tb1 & 3;
435d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
436d4e8164fSbellard             if (n1 == n && tb1 == tb)
437d4e8164fSbellard                 break;
438d4e8164fSbellard             if (n1 == 2) {
439d4e8164fSbellard                 ptb = &tb1->jmp_first;
440d4e8164fSbellard             } else {
441d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
442d4e8164fSbellard             }
443d4e8164fSbellard         }
444d4e8164fSbellard         /* now we can suppress tb(n) from the list */
445d4e8164fSbellard         *ptb = tb->jmp_next[n];
446d4e8164fSbellard 
447d4e8164fSbellard         tb->jmp_next[n] = NULL;
448d4e8164fSbellard     }
449d4e8164fSbellard }
450d4e8164fSbellard 
451d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
452d4e8164fSbellard    another TB */
453d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
454d4e8164fSbellard {
455d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
456d4e8164fSbellard }
457d4e8164fSbellard 
4589fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
459fd6ce8f6Sbellard {
4606a00d601Sbellard     CPUState *env;
461fd6ce8f6Sbellard     PageDesc *p;
4628a40a180Sbellard     unsigned int h, n1;
4639fa3e853Sbellard     target_ulong phys_pc;
4648a40a180Sbellard     TranslationBlock *tb1, *tb2;
465fd6ce8f6Sbellard 
4669fa3e853Sbellard     /* remove the TB from the hash list */
4679fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4689fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4699fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4709fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
4719fa3e853Sbellard 
4729fa3e853Sbellard     /* remove the TB from the page list */
4739fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
4749fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
4759fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4769fa3e853Sbellard         invalidate_page_bitmap(p);
4779fa3e853Sbellard     }
4789fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
4799fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
4809fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4819fa3e853Sbellard         invalidate_page_bitmap(p);
4829fa3e853Sbellard     }
4839fa3e853Sbellard 
4848a40a180Sbellard     tb_invalidated_flag = 1;
4858a40a180Sbellard 
4868a40a180Sbellard     /* remove the TB from the hash list */
4878a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
4886a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
4896a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
4906a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
4916a00d601Sbellard     }
4928a40a180Sbellard 
4938a40a180Sbellard     /* suppress this TB from the two jump lists */
4948a40a180Sbellard     tb_jmp_remove(tb, 0);
4958a40a180Sbellard     tb_jmp_remove(tb, 1);
4968a40a180Sbellard 
4978a40a180Sbellard     /* suppress any remaining jumps to this TB */
4988a40a180Sbellard     tb1 = tb->jmp_first;
4998a40a180Sbellard     for(;;) {
5008a40a180Sbellard         n1 = (long)tb1 & 3;
5018a40a180Sbellard         if (n1 == 2)
5028a40a180Sbellard             break;
5038a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
5048a40a180Sbellard         tb2 = tb1->jmp_next[n1];
5058a40a180Sbellard         tb_reset_jump(tb1, n1);
5068a40a180Sbellard         tb1->jmp_next[n1] = NULL;
5078a40a180Sbellard         tb1 = tb2;
5088a40a180Sbellard     }
5098a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5108a40a180Sbellard 
511e3db7226Sbellard     tb_phys_invalidate_count++;
5129fa3e853Sbellard }
5139fa3e853Sbellard 
5149fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5159fa3e853Sbellard {
5169fa3e853Sbellard     int end, mask, end1;
5179fa3e853Sbellard 
5189fa3e853Sbellard     end = start + len;
5199fa3e853Sbellard     tab += start >> 3;
5209fa3e853Sbellard     mask = 0xff << (start & 7);
5219fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5229fa3e853Sbellard         if (start < end) {
5239fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5249fa3e853Sbellard             *tab |= mask;
5259fa3e853Sbellard         }
5269fa3e853Sbellard     } else {
5279fa3e853Sbellard         *tab++ |= mask;
5289fa3e853Sbellard         start = (start + 8) & ~7;
5299fa3e853Sbellard         end1 = end & ~7;
5309fa3e853Sbellard         while (start < end1) {
5319fa3e853Sbellard             *tab++ = 0xff;
5329fa3e853Sbellard             start += 8;
5339fa3e853Sbellard         }
5349fa3e853Sbellard         if (start < end) {
5359fa3e853Sbellard             mask = ~(0xff << (end & 7));
5369fa3e853Sbellard             *tab |= mask;
5379fa3e853Sbellard         }
5389fa3e853Sbellard     }
5399fa3e853Sbellard }
5409fa3e853Sbellard 
5419fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5429fa3e853Sbellard {
5439fa3e853Sbellard     int n, tb_start, tb_end;
5449fa3e853Sbellard     TranslationBlock *tb;
5459fa3e853Sbellard 
54659817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5479fa3e853Sbellard     if (!p->code_bitmap)
5489fa3e853Sbellard         return;
5499fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5509fa3e853Sbellard 
5519fa3e853Sbellard     tb = p->first_tb;
5529fa3e853Sbellard     while (tb != NULL) {
5539fa3e853Sbellard         n = (long)tb & 3;
5549fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5559fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5569fa3e853Sbellard         if (n == 0) {
5579fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5589fa3e853Sbellard                it is not a problem */
5599fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5609fa3e853Sbellard             tb_end = tb_start + tb->size;
5619fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5629fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5639fa3e853Sbellard         } else {
5649fa3e853Sbellard             tb_start = 0;
5659fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5669fa3e853Sbellard         }
5679fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5689fa3e853Sbellard         tb = tb->page_next[n];
5699fa3e853Sbellard     }
5709fa3e853Sbellard }
5719fa3e853Sbellard 
572d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
573d720b93dSbellard 
574d720b93dSbellard static void tb_gen_code(CPUState *env,
575d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
576d720b93dSbellard                         int cflags)
577d720b93dSbellard {
578d720b93dSbellard     TranslationBlock *tb;
579d720b93dSbellard     uint8_t *tc_ptr;
580d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
581d720b93dSbellard     int code_gen_size;
582d720b93dSbellard 
583c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
584c27004ecSbellard     tb = tb_alloc(pc);
585d720b93dSbellard     if (!tb) {
586d720b93dSbellard         /* flush must be done */
587d720b93dSbellard         tb_flush(env);
588d720b93dSbellard         /* cannot fail at this point */
589c27004ecSbellard         tb = tb_alloc(pc);
590d720b93dSbellard     }
591d720b93dSbellard     tc_ptr = code_gen_ptr;
592d720b93dSbellard     tb->tc_ptr = tc_ptr;
593d720b93dSbellard     tb->cs_base = cs_base;
594d720b93dSbellard     tb->flags = flags;
595d720b93dSbellard     tb->cflags = cflags;
596d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
597d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
598d720b93dSbellard 
599d720b93dSbellard     /* check next page if needed */
600c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
601d720b93dSbellard     phys_page2 = -1;
602c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
603d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
604d720b93dSbellard     }
605d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
606d720b93dSbellard }
607d720b93dSbellard #endif
608d720b93dSbellard 
6099fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6109fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
611d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
612d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
613d720b93dSbellard    TB if code is modified inside this TB. */
614d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
615d720b93dSbellard                                    int is_cpu_write_access)
6169fa3e853Sbellard {
617d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
618d720b93dSbellard     CPUState *env = cpu_single_env;
6199fa3e853Sbellard     PageDesc *p;
620ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6219fa3e853Sbellard     target_ulong tb_start, tb_end;
622d720b93dSbellard     target_ulong current_pc, current_cs_base;
6239fa3e853Sbellard 
6249fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6259fa3e853Sbellard     if (!p)
6269fa3e853Sbellard         return;
6279fa3e853Sbellard     if (!p->code_bitmap &&
628d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
629d720b93dSbellard         is_cpu_write_access) {
6309fa3e853Sbellard         /* build code bitmap */
6319fa3e853Sbellard         build_page_bitmap(p);
6329fa3e853Sbellard     }
6339fa3e853Sbellard 
6349fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6359fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
636d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
637d720b93dSbellard     current_tb_modified = 0;
638d720b93dSbellard     current_tb = NULL; /* avoid warning */
639d720b93dSbellard     current_pc = 0; /* avoid warning */
640d720b93dSbellard     current_cs_base = 0; /* avoid warning */
641d720b93dSbellard     current_flags = 0; /* avoid warning */
6429fa3e853Sbellard     tb = p->first_tb;
6439fa3e853Sbellard     while (tb != NULL) {
6449fa3e853Sbellard         n = (long)tb & 3;
6459fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6469fa3e853Sbellard         tb_next = tb->page_next[n];
6479fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6489fa3e853Sbellard         if (n == 0) {
6499fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6509fa3e853Sbellard                it is not a problem */
6519fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6529fa3e853Sbellard             tb_end = tb_start + tb->size;
6539fa3e853Sbellard         } else {
6549fa3e853Sbellard             tb_start = tb->page_addr[1];
6559fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6569fa3e853Sbellard         }
6579fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
658d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
659d720b93dSbellard             if (current_tb_not_found) {
660d720b93dSbellard                 current_tb_not_found = 0;
661d720b93dSbellard                 current_tb = NULL;
662d720b93dSbellard                 if (env->mem_write_pc) {
663d720b93dSbellard                     /* now we have a real cpu fault */
664d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
665d720b93dSbellard                 }
666d720b93dSbellard             }
667d720b93dSbellard             if (current_tb == tb &&
668d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
669d720b93dSbellard                 /* If we are modifying the current TB, we must stop
670d720b93dSbellard                 its execution. We could be more precise by checking
671d720b93dSbellard                 that the modification is after the current PC, but it
672d720b93dSbellard                 would require a specialized function to partially
673d720b93dSbellard                 restore the CPU state */
674d720b93dSbellard 
675d720b93dSbellard                 current_tb_modified = 1;
676d720b93dSbellard                 cpu_restore_state(current_tb, env,
677d720b93dSbellard                                   env->mem_write_pc, NULL);
678d720b93dSbellard #if defined(TARGET_I386)
679d720b93dSbellard                 current_flags = env->hflags;
680d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
681d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
682d720b93dSbellard                 current_pc = current_cs_base + env->eip;
683d720b93dSbellard #else
684d720b93dSbellard #error unsupported CPU
685d720b93dSbellard #endif
686d720b93dSbellard             }
687d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
6886f5a9f7eSbellard             /* we need to do that to handle the case where a signal
6896f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
6906f5a9f7eSbellard             saved_tb = NULL;
6916f5a9f7eSbellard             if (env) {
692ea1c1802Sbellard                 saved_tb = env->current_tb;
693ea1c1802Sbellard                 env->current_tb = NULL;
6946f5a9f7eSbellard             }
6959fa3e853Sbellard             tb_phys_invalidate(tb, -1);
6966f5a9f7eSbellard             if (env) {
697ea1c1802Sbellard                 env->current_tb = saved_tb;
698ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
699ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
7009fa3e853Sbellard             }
7016f5a9f7eSbellard         }
7029fa3e853Sbellard         tb = tb_next;
7039fa3e853Sbellard     }
7049fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
7059fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
7069fa3e853Sbellard     if (!p->first_tb) {
7079fa3e853Sbellard         invalidate_page_bitmap(p);
708d720b93dSbellard         if (is_cpu_write_access) {
709d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
710d720b93dSbellard         }
711d720b93dSbellard     }
712d720b93dSbellard #endif
713d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
714d720b93dSbellard     if (current_tb_modified) {
715d720b93dSbellard         /* we generate a block containing just the instruction
716d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
717d720b93dSbellard            itself */
718ea1c1802Sbellard         env->current_tb = NULL;
719d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
720d720b93dSbellard                     CF_SINGLE_INSN);
721d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7229fa3e853Sbellard     }
7239fa3e853Sbellard #endif
7249fa3e853Sbellard }
7259fa3e853Sbellard 
7269fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
727d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7289fa3e853Sbellard {
7299fa3e853Sbellard     PageDesc *p;
7309fa3e853Sbellard     int offset, b;
73159817ccbSbellard #if 0
732a4193c8aSbellard     if (1) {
733a4193c8aSbellard         if (loglevel) {
734a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
735a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
736a4193c8aSbellard                    cpu_single_env->eip,
737a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
738a4193c8aSbellard         }
73959817ccbSbellard     }
74059817ccbSbellard #endif
7419fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7429fa3e853Sbellard     if (!p)
7439fa3e853Sbellard         return;
7449fa3e853Sbellard     if (p->code_bitmap) {
7459fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7469fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7479fa3e853Sbellard         if (b & ((1 << len) - 1))
7489fa3e853Sbellard             goto do_invalidate;
7499fa3e853Sbellard     } else {
7509fa3e853Sbellard     do_invalidate:
751d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7529fa3e853Sbellard     }
7539fa3e853Sbellard }
7549fa3e853Sbellard 
7559fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
756d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
757d720b93dSbellard                                     unsigned long pc, void *puc)
7589fa3e853Sbellard {
759d720b93dSbellard     int n, current_flags, current_tb_modified;
760d720b93dSbellard     target_ulong current_pc, current_cs_base;
7619fa3e853Sbellard     PageDesc *p;
762d720b93dSbellard     TranslationBlock *tb, *current_tb;
763d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
764d720b93dSbellard     CPUState *env = cpu_single_env;
765d720b93dSbellard #endif
7669fa3e853Sbellard 
7679fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7689fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
769fd6ce8f6Sbellard     if (!p)
770fd6ce8f6Sbellard         return;
771fd6ce8f6Sbellard     tb = p->first_tb;
772d720b93dSbellard     current_tb_modified = 0;
773d720b93dSbellard     current_tb = NULL;
774d720b93dSbellard     current_pc = 0; /* avoid warning */
775d720b93dSbellard     current_cs_base = 0; /* avoid warning */
776d720b93dSbellard     current_flags = 0; /* avoid warning */
777d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
778d720b93dSbellard     if (tb && pc != 0) {
779d720b93dSbellard         current_tb = tb_find_pc(pc);
780d720b93dSbellard     }
781d720b93dSbellard #endif
782fd6ce8f6Sbellard     while (tb != NULL) {
7839fa3e853Sbellard         n = (long)tb & 3;
7849fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
785d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
786d720b93dSbellard         if (current_tb == tb &&
787d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
788d720b93dSbellard                 /* If we are modifying the current TB, we must stop
789d720b93dSbellard                    its execution. We could be more precise by checking
790d720b93dSbellard                    that the modification is after the current PC, but it
791d720b93dSbellard                    would require a specialized function to partially
792d720b93dSbellard                    restore the CPU state */
793d720b93dSbellard 
794d720b93dSbellard             current_tb_modified = 1;
795d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
796d720b93dSbellard #if defined(TARGET_I386)
797d720b93dSbellard             current_flags = env->hflags;
798d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
799d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
800d720b93dSbellard             current_pc = current_cs_base + env->eip;
801d720b93dSbellard #else
802d720b93dSbellard #error unsupported CPU
803d720b93dSbellard #endif
804d720b93dSbellard         }
805d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
8069fa3e853Sbellard         tb_phys_invalidate(tb, addr);
8079fa3e853Sbellard         tb = tb->page_next[n];
808fd6ce8f6Sbellard     }
809fd6ce8f6Sbellard     p->first_tb = NULL;
810d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
811d720b93dSbellard     if (current_tb_modified) {
812d720b93dSbellard         /* we generate a block containing just the instruction
813d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
814d720b93dSbellard            itself */
815ea1c1802Sbellard         env->current_tb = NULL;
816d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
817d720b93dSbellard                     CF_SINGLE_INSN);
818d720b93dSbellard         cpu_resume_from_signal(env, puc);
819d720b93dSbellard     }
820d720b93dSbellard #endif
821fd6ce8f6Sbellard }
8229fa3e853Sbellard #endif
823fd6ce8f6Sbellard 
824fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8259fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
82653a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
827fd6ce8f6Sbellard {
828fd6ce8f6Sbellard     PageDesc *p;
8299fa3e853Sbellard     TranslationBlock *last_first_tb;
8309fa3e853Sbellard 
8319fa3e853Sbellard     tb->page_addr[n] = page_addr;
8323a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8339fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8349fa3e853Sbellard     last_first_tb = p->first_tb;
8359fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8369fa3e853Sbellard     invalidate_page_bitmap(p);
8379fa3e853Sbellard 
838107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
839d720b93dSbellard 
8409fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8419fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
84253a5960aSpbrook         target_ulong addr;
84353a5960aSpbrook         PageDesc *p2;
844fd6ce8f6Sbellard         int prot;
845fd6ce8f6Sbellard 
846fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
847fd6ce8f6Sbellard            page fault + mprotect overhead) */
84853a5960aSpbrook         page_addr &= qemu_host_page_mask;
849fd6ce8f6Sbellard         prot = 0;
85053a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
85153a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
85253a5960aSpbrook 
85353a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
85453a5960aSpbrook             if (!p2)
85553a5960aSpbrook                 continue;
85653a5960aSpbrook             prot |= p2->flags;
85753a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
85853a5960aSpbrook             page_get_flags(addr);
85953a5960aSpbrook           }
86053a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
861fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
862fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
863fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
86453a5960aSpbrook                page_addr);
865fd6ce8f6Sbellard #endif
866fd6ce8f6Sbellard     }
8679fa3e853Sbellard #else
8689fa3e853Sbellard     /* if some code is already present, then the pages are already
8699fa3e853Sbellard        protected. So we handle the case where only the first TB is
8709fa3e853Sbellard        allocated in a physical page */
8719fa3e853Sbellard     if (!last_first_tb) {
8726a00d601Sbellard         tlb_protect_code(page_addr);
8739fa3e853Sbellard     }
8749fa3e853Sbellard #endif
875d720b93dSbellard 
876d720b93dSbellard #endif /* TARGET_HAS_SMC */
877fd6ce8f6Sbellard }
878fd6ce8f6Sbellard 
879fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
880fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
881c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
882fd6ce8f6Sbellard {
883fd6ce8f6Sbellard     TranslationBlock *tb;
884fd6ce8f6Sbellard 
885fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
886fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
887d4e8164fSbellard         return NULL;
888fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
889fd6ce8f6Sbellard     tb->pc = pc;
890b448f2f3Sbellard     tb->cflags = 0;
891d4e8164fSbellard     return tb;
892d4e8164fSbellard }
893d4e8164fSbellard 
8949fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
8959fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
8969fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
8979fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
898d4e8164fSbellard {
8999fa3e853Sbellard     unsigned int h;
9009fa3e853Sbellard     TranslationBlock **ptb;
9019fa3e853Sbellard 
9029fa3e853Sbellard     /* add in the physical hash table */
9039fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9049fa3e853Sbellard     ptb = &tb_phys_hash[h];
9059fa3e853Sbellard     tb->phys_hash_next = *ptb;
9069fa3e853Sbellard     *ptb = tb;
907fd6ce8f6Sbellard 
908fd6ce8f6Sbellard     /* add in the page list */
9099fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9109fa3e853Sbellard     if (phys_page2 != -1)
9119fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9129fa3e853Sbellard     else
9139fa3e853Sbellard         tb->page_addr[1] = -1;
9149fa3e853Sbellard 
915d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
916d4e8164fSbellard     tb->jmp_next[0] = NULL;
917d4e8164fSbellard     tb->jmp_next[1] = NULL;
918b448f2f3Sbellard #ifdef USE_CODE_COPY
919b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
920b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
921b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
922b448f2f3Sbellard #endif
923d4e8164fSbellard 
924d4e8164fSbellard     /* init original jump addresses */
925d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
926d4e8164fSbellard         tb_reset_jump(tb, 0);
927d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
928d4e8164fSbellard         tb_reset_jump(tb, 1);
9298a40a180Sbellard 
9308a40a180Sbellard #ifdef DEBUG_TB_CHECK
9318a40a180Sbellard     tb_page_check();
9328a40a180Sbellard #endif
933fd6ce8f6Sbellard }
934fd6ce8f6Sbellard 
935a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
936a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
937a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
938a513fe19Sbellard {
939a513fe19Sbellard     int m_min, m_max, m;
940a513fe19Sbellard     unsigned long v;
941a513fe19Sbellard     TranslationBlock *tb;
942a513fe19Sbellard 
943a513fe19Sbellard     if (nb_tbs <= 0)
944a513fe19Sbellard         return NULL;
945a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
946a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
947a513fe19Sbellard         return NULL;
948a513fe19Sbellard     /* binary search (cf Knuth) */
949a513fe19Sbellard     m_min = 0;
950a513fe19Sbellard     m_max = nb_tbs - 1;
951a513fe19Sbellard     while (m_min <= m_max) {
952a513fe19Sbellard         m = (m_min + m_max) >> 1;
953a513fe19Sbellard         tb = &tbs[m];
954a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
955a513fe19Sbellard         if (v == tc_ptr)
956a513fe19Sbellard             return tb;
957a513fe19Sbellard         else if (tc_ptr < v) {
958a513fe19Sbellard             m_max = m - 1;
959a513fe19Sbellard         } else {
960a513fe19Sbellard             m_min = m + 1;
961a513fe19Sbellard         }
962a513fe19Sbellard     }
963a513fe19Sbellard     return &tbs[m_max];
964a513fe19Sbellard }
9657501267eSbellard 
966ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
967ea041c0eSbellard 
968ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
969ea041c0eSbellard {
970ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
971ea041c0eSbellard     unsigned int n1;
972ea041c0eSbellard 
973ea041c0eSbellard     tb1 = tb->jmp_next[n];
974ea041c0eSbellard     if (tb1 != NULL) {
975ea041c0eSbellard         /* find head of list */
976ea041c0eSbellard         for(;;) {
977ea041c0eSbellard             n1 = (long)tb1 & 3;
978ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
979ea041c0eSbellard             if (n1 == 2)
980ea041c0eSbellard                 break;
981ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
982ea041c0eSbellard         }
983ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
984ea041c0eSbellard         tb_next = tb1;
985ea041c0eSbellard 
986ea041c0eSbellard         /* remove tb from the jmp_first list */
987ea041c0eSbellard         ptb = &tb_next->jmp_first;
988ea041c0eSbellard         for(;;) {
989ea041c0eSbellard             tb1 = *ptb;
990ea041c0eSbellard             n1 = (long)tb1 & 3;
991ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
992ea041c0eSbellard             if (n1 == n && tb1 == tb)
993ea041c0eSbellard                 break;
994ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
995ea041c0eSbellard         }
996ea041c0eSbellard         *ptb = tb->jmp_next[n];
997ea041c0eSbellard         tb->jmp_next[n] = NULL;
998ea041c0eSbellard 
999ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1000ea041c0eSbellard         tb_reset_jump(tb, n);
1001ea041c0eSbellard 
10020124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1003ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1004ea041c0eSbellard     }
1005ea041c0eSbellard }
1006ea041c0eSbellard 
1007ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1008ea041c0eSbellard {
1009ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1010ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1011ea041c0eSbellard }
1012ea041c0eSbellard 
10131fddef4bSbellard #if defined(TARGET_HAS_ICE)
1014d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1015d720b93dSbellard {
1016c2f07f81Spbrook     target_ulong addr, pd;
1017c2f07f81Spbrook     ram_addr_t ram_addr;
1018c2f07f81Spbrook     PhysPageDesc *p;
1019d720b93dSbellard 
1020c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1021c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1022c2f07f81Spbrook     if (!p) {
1023c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1024c2f07f81Spbrook     } else {
1025c2f07f81Spbrook         pd = p->phys_offset;
1026c2f07f81Spbrook     }
1027c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1028706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1029d720b93dSbellard }
1030c27004ecSbellard #endif
1031d720b93dSbellard 
1032c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1033c33a346eSbellard    breakpoint is reached */
10342e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
10354c3a88a2Sbellard {
10361fddef4bSbellard #if defined(TARGET_HAS_ICE)
10374c3a88a2Sbellard     int i;
10384c3a88a2Sbellard 
10394c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10404c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10414c3a88a2Sbellard             return 0;
10424c3a88a2Sbellard     }
10434c3a88a2Sbellard 
10444c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
10454c3a88a2Sbellard         return -1;
10464c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1047d720b93dSbellard 
1048d720b93dSbellard     breakpoint_invalidate(env, pc);
10494c3a88a2Sbellard     return 0;
10504c3a88a2Sbellard #else
10514c3a88a2Sbellard     return -1;
10524c3a88a2Sbellard #endif
10534c3a88a2Sbellard }
10544c3a88a2Sbellard 
10554c3a88a2Sbellard /* remove a breakpoint */
10562e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
10574c3a88a2Sbellard {
10581fddef4bSbellard #if defined(TARGET_HAS_ICE)
10594c3a88a2Sbellard     int i;
10604c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10614c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10624c3a88a2Sbellard             goto found;
10634c3a88a2Sbellard     }
10644c3a88a2Sbellard     return -1;
10654c3a88a2Sbellard  found:
10664c3a88a2Sbellard     env->nb_breakpoints--;
10671fddef4bSbellard     if (i < env->nb_breakpoints)
10681fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1069d720b93dSbellard 
1070d720b93dSbellard     breakpoint_invalidate(env, pc);
10714c3a88a2Sbellard     return 0;
10724c3a88a2Sbellard #else
10734c3a88a2Sbellard     return -1;
10744c3a88a2Sbellard #endif
10754c3a88a2Sbellard }
10764c3a88a2Sbellard 
1077c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1078c33a346eSbellard    CPU loop after each instruction */
1079c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1080c33a346eSbellard {
10811fddef4bSbellard #if defined(TARGET_HAS_ICE)
1082c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1083c33a346eSbellard         env->singlestep_enabled = enabled;
1084c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
10859fa3e853Sbellard         /* XXX: only flush what is necessary */
10860124311eSbellard         tb_flush(env);
1087c33a346eSbellard     }
1088c33a346eSbellard #endif
1089c33a346eSbellard }
1090c33a346eSbellard 
109134865134Sbellard /* enable or disable low levels log */
109234865134Sbellard void cpu_set_log(int log_flags)
109334865134Sbellard {
109434865134Sbellard     loglevel = log_flags;
109534865134Sbellard     if (loglevel && !logfile) {
109634865134Sbellard         logfile = fopen(logfilename, "w");
109734865134Sbellard         if (!logfile) {
109834865134Sbellard             perror(logfilename);
109934865134Sbellard             _exit(1);
110034865134Sbellard         }
11019fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
11029fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
11039fa3e853Sbellard         {
11049fa3e853Sbellard             static uint8_t logfile_buf[4096];
11059fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
11069fa3e853Sbellard         }
11079fa3e853Sbellard #else
110834865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
11099fa3e853Sbellard #endif
111034865134Sbellard     }
111134865134Sbellard }
111234865134Sbellard 
111334865134Sbellard void cpu_set_log_filename(const char *filename)
111434865134Sbellard {
111534865134Sbellard     logfilename = strdup(filename);
111634865134Sbellard }
1117c33a346eSbellard 
11180124311eSbellard /* mask must never be zero, except for A20 change call */
111968a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1120ea041c0eSbellard {
1121ea041c0eSbellard     TranslationBlock *tb;
1122ee8b7021Sbellard     static int interrupt_lock;
1123ea041c0eSbellard 
112468a79315Sbellard     env->interrupt_request |= mask;
1125ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1126ea041c0eSbellard        all the potentially executing TB */
1127ea041c0eSbellard     tb = env->current_tb;
1128ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1129ee8b7021Sbellard         env->current_tb = NULL;
1130ea041c0eSbellard         tb_reset_jump_recursive(tb);
1131ee8b7021Sbellard         interrupt_lock = 0;
1132ea041c0eSbellard     }
1133ea041c0eSbellard }
1134ea041c0eSbellard 
1135b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1136b54ad049Sbellard {
1137b54ad049Sbellard     env->interrupt_request &= ~mask;
1138b54ad049Sbellard }
1139b54ad049Sbellard 
1140f193c797Sbellard CPULogItem cpu_log_items[] = {
1141f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1142f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1143f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1144f193c797Sbellard       "show target assembly code for each compiled TB" },
1145f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1146f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1147f193c797Sbellard #ifdef TARGET_I386
1148f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1149f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1150f193c797Sbellard #endif
1151f193c797Sbellard     { CPU_LOG_INT, "int",
1152f193c797Sbellard       "show interrupts/exceptions in short format" },
1153f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1154f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
11559fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
11569fddaa0cSbellard       "show CPU state before bloc translation" },
1157f193c797Sbellard #ifdef TARGET_I386
1158f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1159f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1160f193c797Sbellard #endif
11618e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1162fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1163fd872598Sbellard       "show all i/o ports accesses" },
11648e3a9fd2Sbellard #endif
1165f193c797Sbellard     { 0, NULL, NULL },
1166f193c797Sbellard };
1167f193c797Sbellard 
1168f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1169f193c797Sbellard {
1170f193c797Sbellard     if (strlen(s2) != n)
1171f193c797Sbellard         return 0;
1172f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1173f193c797Sbellard }
1174f193c797Sbellard 
1175f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1176f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1177f193c797Sbellard {
1178f193c797Sbellard     CPULogItem *item;
1179f193c797Sbellard     int mask;
1180f193c797Sbellard     const char *p, *p1;
1181f193c797Sbellard 
1182f193c797Sbellard     p = str;
1183f193c797Sbellard     mask = 0;
1184f193c797Sbellard     for(;;) {
1185f193c797Sbellard         p1 = strchr(p, ',');
1186f193c797Sbellard         if (!p1)
1187f193c797Sbellard             p1 = p + strlen(p);
11888e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
11898e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
11908e3a9fd2Sbellard 			mask |= item->mask;
11918e3a9fd2Sbellard 		}
11928e3a9fd2Sbellard 	} else {
1193f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1194f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1195f193c797Sbellard                 goto found;
1196f193c797Sbellard         }
1197f193c797Sbellard         return 0;
11988e3a9fd2Sbellard 	}
1199f193c797Sbellard     found:
1200f193c797Sbellard         mask |= item->mask;
1201f193c797Sbellard         if (*p1 != ',')
1202f193c797Sbellard             break;
1203f193c797Sbellard         p = p1 + 1;
1204f193c797Sbellard     }
1205f193c797Sbellard     return mask;
1206f193c797Sbellard }
1207ea041c0eSbellard 
12087501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
12097501267eSbellard {
12107501267eSbellard     va_list ap;
12117501267eSbellard 
12127501267eSbellard     va_start(ap, fmt);
12137501267eSbellard     fprintf(stderr, "qemu: fatal: ");
12147501267eSbellard     vfprintf(stderr, fmt, ap);
12157501267eSbellard     fprintf(stderr, "\n");
12167501267eSbellard #ifdef TARGET_I386
12177fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
12187fe48483Sbellard #else
12197fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
12207501267eSbellard #endif
12217501267eSbellard     va_end(ap);
12227501267eSbellard     abort();
12237501267eSbellard }
12247501267eSbellard 
12250124311eSbellard #if !defined(CONFIG_USER_ONLY)
12260124311eSbellard 
1227ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1228ee8b7021Sbellard    implemented yet) */
1229ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
123033417e70Sbellard {
123133417e70Sbellard     int i;
12320124311eSbellard 
12339fa3e853Sbellard #if defined(DEBUG_TLB)
12349fa3e853Sbellard     printf("tlb_flush:\n");
12359fa3e853Sbellard #endif
12360124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12370124311eSbellard        links while we are modifying them */
12380124311eSbellard     env->current_tb = NULL;
12390124311eSbellard 
124033417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
124184b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
124284b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
124384b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
124484b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
124584b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
124684b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
124733417e70Sbellard     }
12489fa3e853Sbellard 
12498a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
12509fa3e853Sbellard 
12519fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12529fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
12539fa3e853Sbellard #endif
12540a962c02Sbellard #ifdef USE_KQEMU
12550a962c02Sbellard     if (env->kqemu_enabled) {
12560a962c02Sbellard         kqemu_flush(env, flush_global);
12570a962c02Sbellard     }
12580a962c02Sbellard #endif
1259e3db7226Sbellard     tlb_flush_count++;
126033417e70Sbellard }
126133417e70Sbellard 
1262274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
126361382a50Sbellard {
126484b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
126584b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126684b7b8e7Sbellard         addr == (tlb_entry->addr_write &
126784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126884b7b8e7Sbellard         addr == (tlb_entry->addr_code &
126984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
127084b7b8e7Sbellard         tlb_entry->addr_read = -1;
127184b7b8e7Sbellard         tlb_entry->addr_write = -1;
127284b7b8e7Sbellard         tlb_entry->addr_code = -1;
127384b7b8e7Sbellard     }
127461382a50Sbellard }
127561382a50Sbellard 
12762e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
127733417e70Sbellard {
12788a40a180Sbellard     int i;
12799fa3e853Sbellard     TranslationBlock *tb;
12800124311eSbellard 
12819fa3e853Sbellard #if defined(DEBUG_TLB)
1282108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
12839fa3e853Sbellard #endif
12840124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12850124311eSbellard        links while we are modifying them */
12860124311eSbellard     env->current_tb = NULL;
128733417e70Sbellard 
128861382a50Sbellard     addr &= TARGET_PAGE_MASK;
128933417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
129084b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
129184b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
12920124311eSbellard 
1293b362e5e0Spbrook     /* Discard jump cache entries for any tb which might potentially
1294b362e5e0Spbrook        overlap the flushed page.  */
1295b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1296b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1297b362e5e0Spbrook 
1298b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr);
1299b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
130061382a50Sbellard 
13019fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13029fa3e853Sbellard     if (addr < MMAP_AREA_END)
13039fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
13049fa3e853Sbellard #endif
13050a962c02Sbellard #ifdef USE_KQEMU
13060a962c02Sbellard     if (env->kqemu_enabled) {
13070a962c02Sbellard         kqemu_flush_page(env, addr);
13080a962c02Sbellard     }
13090a962c02Sbellard #endif
13109fa3e853Sbellard }
13119fa3e853Sbellard 
13129fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
13139fa3e853Sbellard    can be detected */
13146a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
131561382a50Sbellard {
13166a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
13176a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
13186a00d601Sbellard                                     CODE_DIRTY_FLAG);
13199fa3e853Sbellard }
13209fa3e853Sbellard 
13219fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
13223a7d929eSbellard    tested for self modifying code */
13233a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
13243a7d929eSbellard                                     target_ulong vaddr)
13259fa3e853Sbellard {
13263a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
13279fa3e853Sbellard }
13289fa3e853Sbellard 
13291ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
13301ccde1cbSbellard                                          unsigned long start, unsigned long length)
13311ccde1cbSbellard {
13321ccde1cbSbellard     unsigned long addr;
133384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
133484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
13351ccde1cbSbellard         if ((addr - start) < length) {
133684b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
13371ccde1cbSbellard         }
13381ccde1cbSbellard     }
13391ccde1cbSbellard }
13401ccde1cbSbellard 
13413a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
13420a962c02Sbellard                                      int dirty_flags)
13431ccde1cbSbellard {
13441ccde1cbSbellard     CPUState *env;
13454f2ac237Sbellard     unsigned long length, start1;
13460a962c02Sbellard     int i, mask, len;
13470a962c02Sbellard     uint8_t *p;
13481ccde1cbSbellard 
13491ccde1cbSbellard     start &= TARGET_PAGE_MASK;
13501ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
13511ccde1cbSbellard 
13521ccde1cbSbellard     length = end - start;
13531ccde1cbSbellard     if (length == 0)
13541ccde1cbSbellard         return;
13550a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
13563a7d929eSbellard #ifdef USE_KQEMU
13576a00d601Sbellard     /* XXX: should not depend on cpu context */
13586a00d601Sbellard     env = first_cpu;
13593a7d929eSbellard     if (env->kqemu_enabled) {
1360f23db169Sbellard         ram_addr_t addr;
1361f23db169Sbellard         addr = start;
1362f23db169Sbellard         for(i = 0; i < len; i++) {
1363f23db169Sbellard             kqemu_set_notdirty(env, addr);
1364f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1365f23db169Sbellard         }
13663a7d929eSbellard     }
13673a7d929eSbellard #endif
1368f23db169Sbellard     mask = ~dirty_flags;
1369f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1370f23db169Sbellard     for(i = 0; i < len; i++)
1371f23db169Sbellard         p[i] &= mask;
1372f23db169Sbellard 
13731ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
13741ccde1cbSbellard        when accessing the range */
137559817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
13766a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
13771ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
137884b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
13791ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
138084b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
13816a00d601Sbellard     }
138259817ccbSbellard 
138359817ccbSbellard #if !defined(CONFIG_SOFTMMU)
138459817ccbSbellard     /* XXX: this is expensive */
138559817ccbSbellard     {
138659817ccbSbellard         VirtPageDesc *p;
138759817ccbSbellard         int j;
138859817ccbSbellard         target_ulong addr;
138959817ccbSbellard 
139059817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
139159817ccbSbellard             p = l1_virt_map[i];
139259817ccbSbellard             if (p) {
139359817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
139459817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
139559817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
139659817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
139759817ccbSbellard                         (p->prot & PROT_WRITE)) {
139859817ccbSbellard                         if (addr < MMAP_AREA_END) {
139959817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
140059817ccbSbellard                                      p->prot & ~PROT_WRITE);
140159817ccbSbellard                         }
140259817ccbSbellard                     }
140359817ccbSbellard                     addr += TARGET_PAGE_SIZE;
140459817ccbSbellard                     p++;
140559817ccbSbellard                 }
140659817ccbSbellard             }
140759817ccbSbellard         }
140859817ccbSbellard     }
140959817ccbSbellard #endif
14101ccde1cbSbellard }
14111ccde1cbSbellard 
14123a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
14133a7d929eSbellard {
14143a7d929eSbellard     ram_addr_t ram_addr;
14153a7d929eSbellard 
141684b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
141784b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
14183a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
14193a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
142084b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
14213a7d929eSbellard         }
14223a7d929eSbellard     }
14233a7d929eSbellard }
14243a7d929eSbellard 
14253a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
14263a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
14273a7d929eSbellard {
14283a7d929eSbellard     int i;
14293a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
143084b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
14313a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
143284b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
14333a7d929eSbellard }
14343a7d929eSbellard 
14351ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
14361ccde1cbSbellard                                   unsigned long start)
14371ccde1cbSbellard {
14381ccde1cbSbellard     unsigned long addr;
143984b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
144084b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14411ccde1cbSbellard         if (addr == start) {
144284b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
14431ccde1cbSbellard         }
14441ccde1cbSbellard     }
14451ccde1cbSbellard }
14461ccde1cbSbellard 
14471ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
14481ccde1cbSbellard    addr so that it is no longer dirty */
14496a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
14506a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
14511ccde1cbSbellard {
14521ccde1cbSbellard     int i;
14531ccde1cbSbellard 
14541ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
14551ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
145684b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
145784b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
14581ccde1cbSbellard }
14591ccde1cbSbellard 
146059817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
146159817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
146259817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
146359817ccbSbellard    conflicting with the host address space). */
146484b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
14652e12669aSbellard                       target_phys_addr_t paddr, int prot,
14669fa3e853Sbellard                       int is_user, int is_softmmu)
14679fa3e853Sbellard {
146892e873b9Sbellard     PhysPageDesc *p;
14694f2ac237Sbellard     unsigned long pd;
14709fa3e853Sbellard     unsigned int index;
14714f2ac237Sbellard     target_ulong address;
1472108c49b8Sbellard     target_phys_addr_t addend;
14739fa3e853Sbellard     int ret;
147484b7b8e7Sbellard     CPUTLBEntry *te;
14759fa3e853Sbellard 
147692e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
14779fa3e853Sbellard     if (!p) {
14789fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
14799fa3e853Sbellard     } else {
14809fa3e853Sbellard         pd = p->phys_offset;
14819fa3e853Sbellard     }
14829fa3e853Sbellard #if defined(DEBUG_TLB)
14833a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
148484b7b8e7Sbellard            vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
14859fa3e853Sbellard #endif
14869fa3e853Sbellard 
14879fa3e853Sbellard     ret = 0;
14889fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14899fa3e853Sbellard     if (is_softmmu)
14909fa3e853Sbellard #endif
14919fa3e853Sbellard     {
14922a4188a3Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
14939fa3e853Sbellard             /* IO memory case */
14949fa3e853Sbellard             address = vaddr | pd;
14959fa3e853Sbellard             addend = paddr;
14969fa3e853Sbellard         } else {
14979fa3e853Sbellard             /* standard memory */
14989fa3e853Sbellard             address = vaddr;
14999fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
15009fa3e853Sbellard         }
15019fa3e853Sbellard 
150290f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
15039fa3e853Sbellard         addend -= vaddr;
150484b7b8e7Sbellard         te = &env->tlb_table[is_user][index];
150584b7b8e7Sbellard         te->addend = addend;
150667b915a5Sbellard         if (prot & PAGE_READ) {
150784b7b8e7Sbellard             te->addr_read = address;
15089fa3e853Sbellard         } else {
150984b7b8e7Sbellard             te->addr_read = -1;
151084b7b8e7Sbellard         }
151184b7b8e7Sbellard         if (prot & PAGE_EXEC) {
151284b7b8e7Sbellard             te->addr_code = address;
151384b7b8e7Sbellard         } else {
151484b7b8e7Sbellard             te->addr_code = -1;
15159fa3e853Sbellard         }
151667b915a5Sbellard         if (prot & PAGE_WRITE) {
1517856074ecSbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1518856074ecSbellard                 (pd & IO_MEM_ROMD)) {
1519856074ecSbellard                 /* write access calls the I/O callback */
1520856074ecSbellard                 te->addr_write = vaddr |
1521856074ecSbellard                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
15223a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
15231ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
152484b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
15259fa3e853Sbellard             } else {
152684b7b8e7Sbellard                 te->addr_write = address;
15279fa3e853Sbellard             }
15289fa3e853Sbellard         } else {
152984b7b8e7Sbellard             te->addr_write = -1;
15309fa3e853Sbellard         }
15319fa3e853Sbellard     }
15329fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15339fa3e853Sbellard     else {
15349fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
15359fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
15369fa3e853Sbellard                soft MMU */
15379fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
15389fa3e853Sbellard                 ret = 2;
15399fa3e853Sbellard         } else {
15409fa3e853Sbellard             void *map_addr;
154159817ccbSbellard 
154259817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
154359817ccbSbellard                 ret = 2;
154459817ccbSbellard             } else {
15459fa3e853Sbellard                 if (prot & PROT_WRITE) {
154659817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1547d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
154859817ccbSbellard                         first_tb ||
1549d720b93dSbellard #endif
155059817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
155159817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
15529fa3e853Sbellard                         /* ROM: we do as if code was inside */
15539fa3e853Sbellard                         /* if code is present, we only map as read only and save the
15549fa3e853Sbellard                            original mapping */
15559fa3e853Sbellard                         VirtPageDesc *vp;
15569fa3e853Sbellard 
155790f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
15589fa3e853Sbellard                         vp->phys_addr = pd;
15599fa3e853Sbellard                         vp->prot = prot;
15609fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
15619fa3e853Sbellard                         prot &= ~PAGE_WRITE;
15629fa3e853Sbellard                     }
15639fa3e853Sbellard                 }
15649fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
15659fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
15669fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
15679fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
15689fa3e853Sbellard                               paddr, vaddr);
15699fa3e853Sbellard                 }
15709fa3e853Sbellard             }
15719fa3e853Sbellard         }
157259817ccbSbellard     }
15739fa3e853Sbellard #endif
15749fa3e853Sbellard     return ret;
15759fa3e853Sbellard }
15769fa3e853Sbellard 
15779fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
15789fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
157953a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
15809fa3e853Sbellard {
15819fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15829fa3e853Sbellard     VirtPageDesc *vp;
15839fa3e853Sbellard 
15849fa3e853Sbellard #if defined(DEBUG_TLB)
15859fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
15869fa3e853Sbellard #endif
15879fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
158859817ccbSbellard 
158959817ccbSbellard     /* if it is not mapped, no need to worry here */
159059817ccbSbellard     if (addr >= MMAP_AREA_END)
159159817ccbSbellard         return 0;
15929fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
15939fa3e853Sbellard     if (!vp)
15949fa3e853Sbellard         return 0;
15959fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
15969fa3e853Sbellard        validates only the code TLB */
15979fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
15989fa3e853Sbellard         return 0;
15999fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
16009fa3e853Sbellard         return 0;
16019fa3e853Sbellard #if defined(DEBUG_TLB)
16029fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
16039fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
16049fa3e853Sbellard #endif
160559817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
160659817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
160759817ccbSbellard                   (unsigned long)addr, vp->prot);
1608d720b93dSbellard     /* set the dirty bit */
16090a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1610d720b93dSbellard     /* flush the code inside */
1611d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
16129fa3e853Sbellard     return 1;
16139fa3e853Sbellard #else
16149fa3e853Sbellard     return 0;
16159fa3e853Sbellard #endif
161633417e70Sbellard }
161733417e70Sbellard 
16180124311eSbellard #else
16190124311eSbellard 
1620ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
16210124311eSbellard {
16220124311eSbellard }
16230124311eSbellard 
16242e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
16250124311eSbellard {
16260124311eSbellard }
16270124311eSbellard 
162884b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
16292e12669aSbellard                       target_phys_addr_t paddr, int prot,
16309fa3e853Sbellard                       int is_user, int is_softmmu)
163133417e70Sbellard {
16329fa3e853Sbellard     return 0;
163333417e70Sbellard }
163433417e70Sbellard 
16359fa3e853Sbellard /* dump memory mappings */
16369fa3e853Sbellard void page_dump(FILE *f)
163733417e70Sbellard {
16389fa3e853Sbellard     unsigned long start, end;
16399fa3e853Sbellard     int i, j, prot, prot1;
16409fa3e853Sbellard     PageDesc *p;
16419fa3e853Sbellard 
16429fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
16439fa3e853Sbellard             "start", "end", "size", "prot");
16449fa3e853Sbellard     start = -1;
16459fa3e853Sbellard     end = -1;
16469fa3e853Sbellard     prot = 0;
16479fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
16489fa3e853Sbellard         if (i < L1_SIZE)
16499fa3e853Sbellard             p = l1_map[i];
16509fa3e853Sbellard         else
16519fa3e853Sbellard             p = NULL;
16529fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
165333417e70Sbellard             if (!p)
16549fa3e853Sbellard                 prot1 = 0;
16559fa3e853Sbellard             else
16569fa3e853Sbellard                 prot1 = p[j].flags;
16579fa3e853Sbellard             if (prot1 != prot) {
16589fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
16599fa3e853Sbellard                 if (start != -1) {
16609fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
16619fa3e853Sbellard                             start, end, end - start,
16629fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
16639fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
16649fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
166533417e70Sbellard                 }
16669fa3e853Sbellard                 if (prot1 != 0)
16679fa3e853Sbellard                     start = end;
16689fa3e853Sbellard                 else
16699fa3e853Sbellard                     start = -1;
16709fa3e853Sbellard                 prot = prot1;
16719fa3e853Sbellard             }
16729fa3e853Sbellard             if (!p)
16739fa3e853Sbellard                 break;
16749fa3e853Sbellard         }
16759fa3e853Sbellard     }
16769fa3e853Sbellard }
16779fa3e853Sbellard 
167853a5960aSpbrook int page_get_flags(target_ulong address)
16799fa3e853Sbellard {
16809fa3e853Sbellard     PageDesc *p;
16819fa3e853Sbellard 
16829fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
16839fa3e853Sbellard     if (!p)
16849fa3e853Sbellard         return 0;
16859fa3e853Sbellard     return p->flags;
16869fa3e853Sbellard }
16879fa3e853Sbellard 
16889fa3e853Sbellard /* modify the flags of a page and invalidate the code if
16899fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
16909fa3e853Sbellard    depending on PAGE_WRITE */
169153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
16929fa3e853Sbellard {
16939fa3e853Sbellard     PageDesc *p;
169453a5960aSpbrook     target_ulong addr;
16959fa3e853Sbellard 
16969fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
16979fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
16989fa3e853Sbellard     if (flags & PAGE_WRITE)
16999fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
17009fa3e853Sbellard     spin_lock(&tb_lock);
17019fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
17029fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17039fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
17049fa3e853Sbellard            inside */
17059fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
17069fa3e853Sbellard             (flags & PAGE_WRITE) &&
17079fa3e853Sbellard             p->first_tb) {
1708d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
17099fa3e853Sbellard         }
17109fa3e853Sbellard         p->flags = flags;
17119fa3e853Sbellard     }
17129fa3e853Sbellard     spin_unlock(&tb_lock);
17139fa3e853Sbellard }
17149fa3e853Sbellard 
17159fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17169fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
171753a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
17189fa3e853Sbellard {
17199fa3e853Sbellard     unsigned int page_index, prot, pindex;
17209fa3e853Sbellard     PageDesc *p, *p1;
172153a5960aSpbrook     target_ulong host_start, host_end, addr;
17229fa3e853Sbellard 
172383fb7adfSbellard     host_start = address & qemu_host_page_mask;
17249fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
17259fa3e853Sbellard     p1 = page_find(page_index);
17269fa3e853Sbellard     if (!p1)
17279fa3e853Sbellard         return 0;
172883fb7adfSbellard     host_end = host_start + qemu_host_page_size;
17299fa3e853Sbellard     p = p1;
17309fa3e853Sbellard     prot = 0;
17319fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
17329fa3e853Sbellard         prot |= p->flags;
17339fa3e853Sbellard         p++;
17349fa3e853Sbellard     }
17359fa3e853Sbellard     /* if the page was really writable, then we change its
17369fa3e853Sbellard        protection back to writable */
17379fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
17389fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
17399fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
174053a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
17419fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
17429fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
17439fa3e853Sbellard             /* and since the content will be modified, we must invalidate
17449fa3e853Sbellard                the corresponding translated code. */
1745d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
17469fa3e853Sbellard #ifdef DEBUG_TB_CHECK
17479fa3e853Sbellard             tb_invalidate_check(address);
17489fa3e853Sbellard #endif
17499fa3e853Sbellard             return 1;
17509fa3e853Sbellard         }
17519fa3e853Sbellard     }
17529fa3e853Sbellard     return 0;
17539fa3e853Sbellard }
17549fa3e853Sbellard 
17559fa3e853Sbellard /* call this function when system calls directly modify a memory area */
175653a5960aSpbrook /* ??? This should be redundant now we have lock_user.  */
175753a5960aSpbrook void page_unprotect_range(target_ulong data, target_ulong data_size)
17589fa3e853Sbellard {
175953a5960aSpbrook     target_ulong start, end, addr;
17609fa3e853Sbellard 
176153a5960aSpbrook     start = data;
17629fa3e853Sbellard     end = start + data_size;
17639fa3e853Sbellard     start &= TARGET_PAGE_MASK;
17649fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
17659fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1766d720b93dSbellard         page_unprotect(addr, 0, NULL);
17679fa3e853Sbellard     }
17689fa3e853Sbellard }
17699fa3e853Sbellard 
17706a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
17716a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
17721ccde1cbSbellard {
17731ccde1cbSbellard }
17749fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
177533417e70Sbellard 
177633417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
177733417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
177833417e70Sbellard    io memory page */
17792e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
17802e12669aSbellard                                   unsigned long size,
17812e12669aSbellard                                   unsigned long phys_offset)
178233417e70Sbellard {
1783108c49b8Sbellard     target_phys_addr_t addr, end_addr;
178492e873b9Sbellard     PhysPageDesc *p;
17859d42037bSbellard     CPUState *env;
178633417e70Sbellard 
17875fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
178833417e70Sbellard     end_addr = start_addr + size;
17895fd386f6Sbellard     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1790108c49b8Sbellard         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
17919fa3e853Sbellard         p->phys_offset = phys_offset;
17922a4188a3Sbellard         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
17932a4188a3Sbellard             (phys_offset & IO_MEM_ROMD))
179433417e70Sbellard             phys_offset += TARGET_PAGE_SIZE;
179533417e70Sbellard     }
17969d42037bSbellard 
17979d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
17989d42037bSbellard        reset the modified entries */
17999d42037bSbellard     /* XXX: slow ! */
18009d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
18019d42037bSbellard         tlb_flush(env, 1);
18029d42037bSbellard     }
180333417e70Sbellard }
180433417e70Sbellard 
1805ba863458Sbellard /* XXX: temporary until new memory mapping API */
1806ba863458Sbellard uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1807ba863458Sbellard {
1808ba863458Sbellard     PhysPageDesc *p;
1809ba863458Sbellard 
1810ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1811ba863458Sbellard     if (!p)
1812ba863458Sbellard         return IO_MEM_UNASSIGNED;
1813ba863458Sbellard     return p->phys_offset;
1814ba863458Sbellard }
1815ba863458Sbellard 
1816e9a1ab19Sbellard /* XXX: better than nothing */
1817e9a1ab19Sbellard ram_addr_t qemu_ram_alloc(unsigned int size)
1818e9a1ab19Sbellard {
1819e9a1ab19Sbellard     ram_addr_t addr;
1820e9a1ab19Sbellard     if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1821e9a1ab19Sbellard         fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1822e9a1ab19Sbellard                 size, phys_ram_size);
1823e9a1ab19Sbellard         abort();
1824e9a1ab19Sbellard     }
1825e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
1826e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1827e9a1ab19Sbellard     return addr;
1828e9a1ab19Sbellard }
1829e9a1ab19Sbellard 
1830e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
1831e9a1ab19Sbellard {
1832e9a1ab19Sbellard }
1833e9a1ab19Sbellard 
1834a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
183533417e70Sbellard {
183667d3b957Spbrook #ifdef DEBUG_UNASSIGNED
183767d3b957Spbrook     printf("Unassigned mem read  0x%08x\n", (int)addr);
183867d3b957Spbrook #endif
183933417e70Sbellard     return 0;
184033417e70Sbellard }
184133417e70Sbellard 
1842a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
184333417e70Sbellard {
184467d3b957Spbrook #ifdef DEBUG_UNASSIGNED
184567d3b957Spbrook     printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
184667d3b957Spbrook #endif
184733417e70Sbellard }
184833417e70Sbellard 
184933417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
185033417e70Sbellard     unassigned_mem_readb,
185133417e70Sbellard     unassigned_mem_readb,
185233417e70Sbellard     unassigned_mem_readb,
185333417e70Sbellard };
185433417e70Sbellard 
185533417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
185633417e70Sbellard     unassigned_mem_writeb,
185733417e70Sbellard     unassigned_mem_writeb,
185833417e70Sbellard     unassigned_mem_writeb,
185933417e70Sbellard };
186033417e70Sbellard 
1861a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
18621ccde1cbSbellard {
18633a7d929eSbellard     unsigned long ram_addr;
18643a7d929eSbellard     int dirty_flags;
18653a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18663a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18673a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18683a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18693a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
18703a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18713a7d929eSbellard #endif
18723a7d929eSbellard     }
1873c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
1874f32fc648Sbellard #ifdef USE_KQEMU
1875f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1876f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1878f32fc648Sbellard #endif
1879f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1882f23db169Sbellard        flushed */
1883f23db169Sbellard     if (dirty_flags == 0xff)
18846a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18851ccde1cbSbellard }
18861ccde1cbSbellard 
1887a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
18881ccde1cbSbellard {
18893a7d929eSbellard     unsigned long ram_addr;
18903a7d929eSbellard     int dirty_flags;
18913a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18923a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18933a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18943a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18953a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
18963a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18973a7d929eSbellard #endif
18983a7d929eSbellard     }
1899c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
1900f32fc648Sbellard #ifdef USE_KQEMU
1901f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1902f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1904f32fc648Sbellard #endif
1905f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1908f23db169Sbellard        flushed */
1909f23db169Sbellard     if (dirty_flags == 0xff)
19106a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
19111ccde1cbSbellard }
19121ccde1cbSbellard 
1913a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
19141ccde1cbSbellard {
19153a7d929eSbellard     unsigned long ram_addr;
19163a7d929eSbellard     int dirty_flags;
19173a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
19183a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
19193a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
19203a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
19213a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
19223a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
19233a7d929eSbellard #endif
19243a7d929eSbellard     }
1925c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
1926f32fc648Sbellard #ifdef USE_KQEMU
1927f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1928f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1929f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1930f32fc648Sbellard #endif
1931f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1932f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1933f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1934f23db169Sbellard        flushed */
1935f23db169Sbellard     if (dirty_flags == 0xff)
19366a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
19371ccde1cbSbellard }
19381ccde1cbSbellard 
19393a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
19403a7d929eSbellard     NULL, /* never used */
19413a7d929eSbellard     NULL, /* never used */
19423a7d929eSbellard     NULL, /* never used */
19433a7d929eSbellard };
19443a7d929eSbellard 
19451ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
19461ccde1cbSbellard     notdirty_mem_writeb,
19471ccde1cbSbellard     notdirty_mem_writew,
19481ccde1cbSbellard     notdirty_mem_writel,
19491ccde1cbSbellard };
19501ccde1cbSbellard 
195133417e70Sbellard static void io_mem_init(void)
195233417e70Sbellard {
19533a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1954a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
19553a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
19561ccde1cbSbellard     io_mem_nb = 5;
19571ccde1cbSbellard 
19581ccde1cbSbellard     /* alloc dirty bits array */
19590a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
19603a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
196133417e70Sbellard }
196233417e70Sbellard 
196333417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
196433417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
196533417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
196633417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
196733417e70Sbellard    allocated. The return value can be used with
196833417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
196933417e70Sbellard int cpu_register_io_memory(int io_index,
197033417e70Sbellard                            CPUReadMemoryFunc **mem_read,
1971a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
1972a4193c8aSbellard                            void *opaque)
197333417e70Sbellard {
197433417e70Sbellard     int i;
197533417e70Sbellard 
197633417e70Sbellard     if (io_index <= 0) {
1977b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
197833417e70Sbellard             return -1;
197933417e70Sbellard         io_index = io_mem_nb++;
198033417e70Sbellard     } else {
198133417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
198233417e70Sbellard             return -1;
198333417e70Sbellard     }
198433417e70Sbellard 
198533417e70Sbellard     for(i = 0;i < 3; i++) {
198633417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
198733417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
198833417e70Sbellard     }
1989a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
199033417e70Sbellard     return io_index << IO_MEM_SHIFT;
199133417e70Sbellard }
199261382a50Sbellard 
19938926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
19948926b517Sbellard {
19958926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
19968926b517Sbellard }
19978926b517Sbellard 
19988926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
19998926b517Sbellard {
20008926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
20018926b517Sbellard }
20028926b517Sbellard 
200313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
200413eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
20052e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
200613eb76e0Sbellard                             int len, int is_write)
200713eb76e0Sbellard {
200813eb76e0Sbellard     int l, flags;
200913eb76e0Sbellard     target_ulong page;
201053a5960aSpbrook     void * p;
201113eb76e0Sbellard 
201213eb76e0Sbellard     while (len > 0) {
201313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
201413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
201513eb76e0Sbellard         if (l > len)
201613eb76e0Sbellard             l = len;
201713eb76e0Sbellard         flags = page_get_flags(page);
201813eb76e0Sbellard         if (!(flags & PAGE_VALID))
201913eb76e0Sbellard             return;
202013eb76e0Sbellard         if (is_write) {
202113eb76e0Sbellard             if (!(flags & PAGE_WRITE))
202213eb76e0Sbellard                 return;
202353a5960aSpbrook             p = lock_user(addr, len, 0);
202453a5960aSpbrook             memcpy(p, buf, len);
202553a5960aSpbrook             unlock_user(p, addr, len);
202613eb76e0Sbellard         } else {
202713eb76e0Sbellard             if (!(flags & PAGE_READ))
202813eb76e0Sbellard                 return;
202953a5960aSpbrook             p = lock_user(addr, len, 1);
203053a5960aSpbrook             memcpy(buf, p, len);
203153a5960aSpbrook             unlock_user(p, addr, 0);
203213eb76e0Sbellard         }
203313eb76e0Sbellard         len -= l;
203413eb76e0Sbellard         buf += l;
203513eb76e0Sbellard         addr += l;
203613eb76e0Sbellard     }
203713eb76e0Sbellard }
20388df1cd07Sbellard 
203913eb76e0Sbellard #else
20402e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
204113eb76e0Sbellard                             int len, int is_write)
204213eb76e0Sbellard {
204313eb76e0Sbellard     int l, io_index;
204413eb76e0Sbellard     uint8_t *ptr;
204513eb76e0Sbellard     uint32_t val;
20462e12669aSbellard     target_phys_addr_t page;
20472e12669aSbellard     unsigned long pd;
204892e873b9Sbellard     PhysPageDesc *p;
204913eb76e0Sbellard 
205013eb76e0Sbellard     while (len > 0) {
205113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
205213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
205313eb76e0Sbellard         if (l > len)
205413eb76e0Sbellard             l = len;
205592e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
205613eb76e0Sbellard         if (!p) {
205713eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
205813eb76e0Sbellard         } else {
205913eb76e0Sbellard             pd = p->phys_offset;
206013eb76e0Sbellard         }
206113eb76e0Sbellard 
206213eb76e0Sbellard         if (is_write) {
20633a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
206413eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20656a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
20666a00d601Sbellard                    potential bugs */
206713eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
20681c213d19Sbellard                     /* 32 bit write access */
2069c27004ecSbellard                     val = ldl_p(buf);
2070a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
207113eb76e0Sbellard                     l = 4;
207213eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
20731c213d19Sbellard                     /* 16 bit write access */
2074c27004ecSbellard                     val = lduw_p(buf);
2075a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
207613eb76e0Sbellard                     l = 2;
207713eb76e0Sbellard                 } else {
20781c213d19Sbellard                     /* 8 bit write access */
2079c27004ecSbellard                     val = ldub_p(buf);
2080a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
208113eb76e0Sbellard                     l = 1;
208213eb76e0Sbellard                 }
208313eb76e0Sbellard             } else {
2084b448f2f3Sbellard                 unsigned long addr1;
2085b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
208613eb76e0Sbellard                 /* RAM case */
2087b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
208813eb76e0Sbellard                 memcpy(ptr, buf, l);
20893a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2090b448f2f3Sbellard                     /* invalidate code */
2091b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2092b448f2f3Sbellard                     /* set dirty bit */
2093f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2094f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
209513eb76e0Sbellard                 }
20963a7d929eSbellard             }
209713eb76e0Sbellard         } else {
20982a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
20992a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
210013eb76e0Sbellard                 /* I/O case */
210113eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
210213eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
210313eb76e0Sbellard                     /* 32 bit read access */
2104a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2105c27004ecSbellard                     stl_p(buf, val);
210613eb76e0Sbellard                     l = 4;
210713eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
210813eb76e0Sbellard                     /* 16 bit read access */
2109a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2110c27004ecSbellard                     stw_p(buf, val);
211113eb76e0Sbellard                     l = 2;
211213eb76e0Sbellard                 } else {
21131c213d19Sbellard                     /* 8 bit read access */
2114a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2115c27004ecSbellard                     stb_p(buf, val);
211613eb76e0Sbellard                     l = 1;
211713eb76e0Sbellard                 }
211813eb76e0Sbellard             } else {
211913eb76e0Sbellard                 /* RAM case */
212013eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
212113eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
212213eb76e0Sbellard                 memcpy(buf, ptr, l);
212313eb76e0Sbellard             }
212413eb76e0Sbellard         }
212513eb76e0Sbellard         len -= l;
212613eb76e0Sbellard         buf += l;
212713eb76e0Sbellard         addr += l;
212813eb76e0Sbellard     }
212913eb76e0Sbellard }
21308df1cd07Sbellard 
2131d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2132d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2133d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2134d0ecd2aaSbellard {
2135d0ecd2aaSbellard     int l;
2136d0ecd2aaSbellard     uint8_t *ptr;
2137d0ecd2aaSbellard     target_phys_addr_t page;
2138d0ecd2aaSbellard     unsigned long pd;
2139d0ecd2aaSbellard     PhysPageDesc *p;
2140d0ecd2aaSbellard 
2141d0ecd2aaSbellard     while (len > 0) {
2142d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2143d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2144d0ecd2aaSbellard         if (l > len)
2145d0ecd2aaSbellard             l = len;
2146d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2147d0ecd2aaSbellard         if (!p) {
2148d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2149d0ecd2aaSbellard         } else {
2150d0ecd2aaSbellard             pd = p->phys_offset;
2151d0ecd2aaSbellard         }
2152d0ecd2aaSbellard 
2153d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
21542a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
21552a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2156d0ecd2aaSbellard             /* do nothing */
2157d0ecd2aaSbellard         } else {
2158d0ecd2aaSbellard             unsigned long addr1;
2159d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2160d0ecd2aaSbellard             /* ROM/RAM case */
2161d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2162d0ecd2aaSbellard             memcpy(ptr, buf, l);
2163d0ecd2aaSbellard         }
2164d0ecd2aaSbellard         len -= l;
2165d0ecd2aaSbellard         buf += l;
2166d0ecd2aaSbellard         addr += l;
2167d0ecd2aaSbellard     }
2168d0ecd2aaSbellard }
2169d0ecd2aaSbellard 
2170d0ecd2aaSbellard 
21718df1cd07Sbellard /* warning: addr must be aligned */
21728df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
21738df1cd07Sbellard {
21748df1cd07Sbellard     int io_index;
21758df1cd07Sbellard     uint8_t *ptr;
21768df1cd07Sbellard     uint32_t val;
21778df1cd07Sbellard     unsigned long pd;
21788df1cd07Sbellard     PhysPageDesc *p;
21798df1cd07Sbellard 
21808df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21818df1cd07Sbellard     if (!p) {
21828df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21838df1cd07Sbellard     } else {
21848df1cd07Sbellard         pd = p->phys_offset;
21858df1cd07Sbellard     }
21868df1cd07Sbellard 
21872a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
21882a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
21898df1cd07Sbellard         /* I/O case */
21908df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
21918df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
21928df1cd07Sbellard     } else {
21938df1cd07Sbellard         /* RAM case */
21948df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
21958df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
21968df1cd07Sbellard         val = ldl_p(ptr);
21978df1cd07Sbellard     }
21988df1cd07Sbellard     return val;
21998df1cd07Sbellard }
22008df1cd07Sbellard 
220184b7b8e7Sbellard /* warning: addr must be aligned */
220284b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
220384b7b8e7Sbellard {
220484b7b8e7Sbellard     int io_index;
220584b7b8e7Sbellard     uint8_t *ptr;
220684b7b8e7Sbellard     uint64_t val;
220784b7b8e7Sbellard     unsigned long pd;
220884b7b8e7Sbellard     PhysPageDesc *p;
220984b7b8e7Sbellard 
221084b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
221184b7b8e7Sbellard     if (!p) {
221284b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
221384b7b8e7Sbellard     } else {
221484b7b8e7Sbellard         pd = p->phys_offset;
221584b7b8e7Sbellard     }
221684b7b8e7Sbellard 
22172a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
22182a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
221984b7b8e7Sbellard         /* I/O case */
222084b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
222184b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
222284b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
222384b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
222484b7b8e7Sbellard #else
222584b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
222684b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
222784b7b8e7Sbellard #endif
222884b7b8e7Sbellard     } else {
222984b7b8e7Sbellard         /* RAM case */
223084b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
223184b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
223284b7b8e7Sbellard         val = ldq_p(ptr);
223384b7b8e7Sbellard     }
223484b7b8e7Sbellard     return val;
223584b7b8e7Sbellard }
223684b7b8e7Sbellard 
2237aab33094Sbellard /* XXX: optimize */
2238aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2239aab33094Sbellard {
2240aab33094Sbellard     uint8_t val;
2241aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2242aab33094Sbellard     return val;
2243aab33094Sbellard }
2244aab33094Sbellard 
2245aab33094Sbellard /* XXX: optimize */
2246aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2247aab33094Sbellard {
2248aab33094Sbellard     uint16_t val;
2249aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2250aab33094Sbellard     return tswap16(val);
2251aab33094Sbellard }
2252aab33094Sbellard 
22538df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
22548df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
22558df1cd07Sbellard    bits are used to track modified PTEs */
22568df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
22578df1cd07Sbellard {
22588df1cd07Sbellard     int io_index;
22598df1cd07Sbellard     uint8_t *ptr;
22608df1cd07Sbellard     unsigned long pd;
22618df1cd07Sbellard     PhysPageDesc *p;
22628df1cd07Sbellard 
22638df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
22648df1cd07Sbellard     if (!p) {
22658df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
22668df1cd07Sbellard     } else {
22678df1cd07Sbellard         pd = p->phys_offset;
22688df1cd07Sbellard     }
22698df1cd07Sbellard 
22703a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
22718df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
22728df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
22738df1cd07Sbellard     } else {
22748df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
22758df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
22768df1cd07Sbellard         stl_p(ptr, val);
22778df1cd07Sbellard     }
22788df1cd07Sbellard }
22798df1cd07Sbellard 
22808df1cd07Sbellard /* warning: addr must be aligned */
22818df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
22828df1cd07Sbellard {
22838df1cd07Sbellard     int io_index;
22848df1cd07Sbellard     uint8_t *ptr;
22858df1cd07Sbellard     unsigned long pd;
22868df1cd07Sbellard     PhysPageDesc *p;
22878df1cd07Sbellard 
22888df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
22898df1cd07Sbellard     if (!p) {
22908df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
22918df1cd07Sbellard     } else {
22928df1cd07Sbellard         pd = p->phys_offset;
22938df1cd07Sbellard     }
22948df1cd07Sbellard 
22953a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
22968df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
22978df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
22988df1cd07Sbellard     } else {
22998df1cd07Sbellard         unsigned long addr1;
23008df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
23018df1cd07Sbellard         /* RAM case */
23028df1cd07Sbellard         ptr = phys_ram_base + addr1;
23038df1cd07Sbellard         stl_p(ptr, val);
23043a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
23058df1cd07Sbellard             /* invalidate code */
23068df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
23078df1cd07Sbellard             /* set dirty bit */
2308f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
23108df1cd07Sbellard         }
23118df1cd07Sbellard     }
23123a7d929eSbellard }
23138df1cd07Sbellard 
2314aab33094Sbellard /* XXX: optimize */
2315aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2316aab33094Sbellard {
2317aab33094Sbellard     uint8_t v = val;
2318aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2319aab33094Sbellard }
2320aab33094Sbellard 
2321aab33094Sbellard /* XXX: optimize */
2322aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2323aab33094Sbellard {
2324aab33094Sbellard     uint16_t v = tswap16(val);
2325aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2326aab33094Sbellard }
2327aab33094Sbellard 
2328aab33094Sbellard /* XXX: optimize */
2329aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2330aab33094Sbellard {
2331aab33094Sbellard     val = tswap64(val);
2332aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2333aab33094Sbellard }
2334aab33094Sbellard 
233513eb76e0Sbellard #endif
233613eb76e0Sbellard 
233713eb76e0Sbellard /* virtual memory access for debug */
2338b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2339b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
234013eb76e0Sbellard {
234113eb76e0Sbellard     int l;
234213eb76e0Sbellard     target_ulong page, phys_addr;
234313eb76e0Sbellard 
234413eb76e0Sbellard     while (len > 0) {
234513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
234613eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
234713eb76e0Sbellard         /* if no physical page mapped, return an error */
234813eb76e0Sbellard         if (phys_addr == -1)
234913eb76e0Sbellard             return -1;
235013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
235113eb76e0Sbellard         if (l > len)
235213eb76e0Sbellard             l = len;
2353b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2354b448f2f3Sbellard                                buf, l, is_write);
235513eb76e0Sbellard         len -= l;
235613eb76e0Sbellard         buf += l;
235713eb76e0Sbellard         addr += l;
235813eb76e0Sbellard     }
235913eb76e0Sbellard     return 0;
236013eb76e0Sbellard }
236113eb76e0Sbellard 
2362e3db7226Sbellard void dump_exec_info(FILE *f,
2363e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2364e3db7226Sbellard {
2365e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2366e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2367e3db7226Sbellard     TranslationBlock *tb;
2368e3db7226Sbellard 
2369e3db7226Sbellard     target_code_size = 0;
2370e3db7226Sbellard     max_target_code_size = 0;
2371e3db7226Sbellard     cross_page = 0;
2372e3db7226Sbellard     direct_jmp_count = 0;
2373e3db7226Sbellard     direct_jmp2_count = 0;
2374e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2375e3db7226Sbellard         tb = &tbs[i];
2376e3db7226Sbellard         target_code_size += tb->size;
2377e3db7226Sbellard         if (tb->size > max_target_code_size)
2378e3db7226Sbellard             max_target_code_size = tb->size;
2379e3db7226Sbellard         if (tb->page_addr[1] != -1)
2380e3db7226Sbellard             cross_page++;
2381e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2382e3db7226Sbellard             direct_jmp_count++;
2383e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2384e3db7226Sbellard                 direct_jmp2_count++;
2385e3db7226Sbellard             }
2386e3db7226Sbellard         }
2387e3db7226Sbellard     }
2388e3db7226Sbellard     /* XXX: avoid using doubles ? */
2389e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2390e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2391e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2392e3db7226Sbellard                 max_target_code_size);
2393e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2394e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2397e3db7226Sbellard             cross_page,
2398e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2400e3db7226Sbellard                 direct_jmp_count,
2401e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2402e3db7226Sbellard                 direct_jmp2_count,
2403e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2405e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2407e3db7226Sbellard }
2408e3db7226Sbellard 
240961382a50Sbellard #if !defined(CONFIG_USER_ONLY)
241061382a50Sbellard 
241161382a50Sbellard #define MMUSUFFIX _cmmu
241261382a50Sbellard #define GETPC() NULL
241361382a50Sbellard #define env cpu_single_env
2414b769d8feSbellard #define SOFTMMU_CODE_ACCESS
241561382a50Sbellard 
241661382a50Sbellard #define SHIFT 0
241761382a50Sbellard #include "softmmu_template.h"
241861382a50Sbellard 
241961382a50Sbellard #define SHIFT 1
242061382a50Sbellard #include "softmmu_template.h"
242161382a50Sbellard 
242261382a50Sbellard #define SHIFT 2
242361382a50Sbellard #include "softmmu_template.h"
242461382a50Sbellard 
242561382a50Sbellard #define SHIFT 3
242661382a50Sbellard #include "softmmu_template.h"
242761382a50Sbellard 
242861382a50Sbellard #undef env
242961382a50Sbellard 
243061382a50Sbellard #endif
2431