xref: /qemu/system/physmem.c (revision bedb69ea0453a65a1c5a7b159ab485c542ecd15c)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
3953a5960aSpbrook #endif
4054936004Sbellard 
41fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4266e85a21Sbellard //#define DEBUG_FLUSH
439fa3e853Sbellard //#define DEBUG_TLB
4467d3b957Spbrook //#define DEBUG_UNASSIGNED
45fd6ce8f6Sbellard 
46fd6ce8f6Sbellard /* make various TB consistency checks */
47fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4898857888Sbellard //#define DEBUG_TLB_CHECK
49fd6ce8f6Sbellard 
501196be37Sths //#define DEBUG_IOPORT
511196be37Sths 
5299773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5399773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5499773bd4Spbrook #undef DEBUG_TB_CHECK
5599773bd4Spbrook #endif
5699773bd4Spbrook 
57fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
58fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59fd6ce8f6Sbellard 
609fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
619fa3e853Sbellard 
629fa3e853Sbellard #define MMAP_AREA_START        0x00000000
639fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
64fd6ce8f6Sbellard 
65108c49b8Sbellard #if defined(TARGET_SPARC64)
66108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
67bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
68bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
69bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
70108c49b8Sbellard #elif defined(TARGET_PPC64)
71108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
72108c49b8Sbellard #else
73108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
75108c49b8Sbellard #endif
76108c49b8Sbellard 
77fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
789fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
79fd6ce8f6Sbellard int nb_tbs;
80eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
81eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
82fd6ce8f6Sbellard 
83b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
84fd6ce8f6Sbellard uint8_t *code_gen_ptr;
85fd6ce8f6Sbellard 
869fa3e853Sbellard int phys_ram_size;
879fa3e853Sbellard int phys_ram_fd;
889fa3e853Sbellard uint8_t *phys_ram_base;
891ccde1cbSbellard uint8_t *phys_ram_dirty;
90e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
919fa3e853Sbellard 
926a00d601Sbellard CPUState *first_cpu;
936a00d601Sbellard /* current CPU in the current thread. It is only valid inside
946a00d601Sbellard    cpu_exec() */
956a00d601Sbellard CPUState *cpu_single_env;
966a00d601Sbellard 
9754936004Sbellard typedef struct PageDesc {
9892e873b9Sbellard     /* list of TBs intersecting this ram page */
99fd6ce8f6Sbellard     TranslationBlock *first_tb;
1009fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1019fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1029fa3e853Sbellard     unsigned int code_write_count;
1039fa3e853Sbellard     uint8_t *code_bitmap;
1049fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1059fa3e853Sbellard     unsigned long flags;
1069fa3e853Sbellard #endif
10754936004Sbellard } PageDesc;
10854936004Sbellard 
10992e873b9Sbellard typedef struct PhysPageDesc {
11092e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
111e04f40b5Sbellard     uint32_t phys_offset;
11292e873b9Sbellard } PhysPageDesc;
11392e873b9Sbellard 
11454936004Sbellard #define L2_BITS 10
115bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
117bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
118bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
119bedb69eaSj_mayer  */
120bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121bedb69eaSj_mayer #else
12254936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
123bedb69eaSj_mayer #endif
12454936004Sbellard 
12554936004Sbellard #define L1_SIZE (1 << L1_BITS)
12654936004Sbellard #define L2_SIZE (1 << L2_BITS)
12754936004Sbellard 
12833417e70Sbellard static void io_mem_init(void);
129fd6ce8f6Sbellard 
13083fb7adfSbellard unsigned long qemu_real_host_page_size;
13183fb7adfSbellard unsigned long qemu_host_page_bits;
13283fb7adfSbellard unsigned long qemu_host_page_size;
13383fb7adfSbellard unsigned long qemu_host_page_mask;
13454936004Sbellard 
13592e873b9Sbellard /* XXX: for system emulation, it could just be an array */
13654936004Sbellard static PageDesc *l1_map[L1_SIZE];
1370a962c02Sbellard PhysPageDesc **l1_phys_map;
13854936004Sbellard 
13933417e70Sbellard /* io memory support */
14033417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
14133417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
142a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
14333417e70Sbellard static int io_mem_nb;
1446658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
1456658ffb8Spbrook static int io_mem_watch;
1466658ffb8Spbrook #endif
14733417e70Sbellard 
14834865134Sbellard /* log support */
14934865134Sbellard char *logfilename = "/tmp/qemu.log";
15034865134Sbellard FILE *logfile;
15134865134Sbellard int loglevel;
15234865134Sbellard 
153e3db7226Sbellard /* statistics */
154e3db7226Sbellard static int tlb_flush_count;
155e3db7226Sbellard static int tb_flush_count;
156e3db7226Sbellard static int tb_phys_invalidate_count;
157e3db7226Sbellard 
158b346ff46Sbellard static void page_init(void)
15954936004Sbellard {
16083fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
16154936004Sbellard        TARGET_PAGE_SIZE */
16267b915a5Sbellard #ifdef _WIN32
163d5a8f07cSbellard     {
164d5a8f07cSbellard         SYSTEM_INFO system_info;
165d5a8f07cSbellard         DWORD old_protect;
166d5a8f07cSbellard 
167d5a8f07cSbellard         GetSystemInfo(&system_info);
168d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
169d5a8f07cSbellard 
170d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
172d5a8f07cSbellard     }
17367b915a5Sbellard #else
17483fb7adfSbellard     qemu_real_host_page_size = getpagesize();
175d5a8f07cSbellard     {
176d5a8f07cSbellard         unsigned long start, end;
177d5a8f07cSbellard 
178d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
179d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
180d5a8f07cSbellard 
181d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
183d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
184d5a8f07cSbellard 
185d5a8f07cSbellard         mprotect((void *)start, end - start,
186d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
187d5a8f07cSbellard     }
18867b915a5Sbellard #endif
189d5a8f07cSbellard 
19083fb7adfSbellard     if (qemu_host_page_size == 0)
19183fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
19283fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
19383fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
19483fb7adfSbellard     qemu_host_page_bits = 0;
19583fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
19683fb7adfSbellard         qemu_host_page_bits++;
19783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
198108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
20054936004Sbellard }
20154936004Sbellard 
202fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
20354936004Sbellard {
20454936004Sbellard     PageDesc **lp, *p;
20554936004Sbellard 
20654936004Sbellard     lp = &l1_map[index >> L2_BITS];
20754936004Sbellard     p = *lp;
20854936004Sbellard     if (!p) {
20954936004Sbellard         /* allocate if not found */
21059817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
211fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
21254936004Sbellard         *lp = p;
21354936004Sbellard     }
21454936004Sbellard     return p + (index & (L2_SIZE - 1));
21554936004Sbellard }
21654936004Sbellard 
217fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
21854936004Sbellard {
21954936004Sbellard     PageDesc *p;
22054936004Sbellard 
22154936004Sbellard     p = l1_map[index >> L2_BITS];
22254936004Sbellard     if (!p)
22354936004Sbellard         return 0;
224fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
22554936004Sbellard }
22654936004Sbellard 
227108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
22892e873b9Sbellard {
229108c49b8Sbellard     void **lp, **p;
230e3f4e2a4Spbrook     PhysPageDesc *pd;
23192e873b9Sbellard 
232108c49b8Sbellard     p = (void **)l1_phys_map;
233108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
234108c49b8Sbellard 
235108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237108c49b8Sbellard #endif
238108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
23992e873b9Sbellard     p = *lp;
24092e873b9Sbellard     if (!p) {
24192e873b9Sbellard         /* allocate if not found */
242108c49b8Sbellard         if (!alloc)
243108c49b8Sbellard             return NULL;
244108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
246108c49b8Sbellard         *lp = p;
247108c49b8Sbellard     }
248108c49b8Sbellard #endif
249108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
250e3f4e2a4Spbrook     pd = *lp;
251e3f4e2a4Spbrook     if (!pd) {
252e3f4e2a4Spbrook         int i;
253108c49b8Sbellard         /* allocate if not found */
254108c49b8Sbellard         if (!alloc)
255108c49b8Sbellard             return NULL;
256e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257e3f4e2a4Spbrook         *lp = pd;
258e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
259e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
26092e873b9Sbellard     }
261e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
26292e873b9Sbellard }
26392e873b9Sbellard 
264108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
26592e873b9Sbellard {
266108c49b8Sbellard     return phys_page_find_alloc(index, 0);
26792e873b9Sbellard }
26892e873b9Sbellard 
2699fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2706a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
2713a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2723a7d929eSbellard                                     target_ulong vaddr);
2739fa3e853Sbellard #endif
274fd6ce8f6Sbellard 
2756a00d601Sbellard void cpu_exec_init(CPUState *env)
276fd6ce8f6Sbellard {
2776a00d601Sbellard     CPUState **penv;
2786a00d601Sbellard     int cpu_index;
2796a00d601Sbellard 
280fd6ce8f6Sbellard     if (!code_gen_ptr) {
281fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
282b346ff46Sbellard         page_init();
28333417e70Sbellard         io_mem_init();
284fd6ce8f6Sbellard     }
2856a00d601Sbellard     env->next_cpu = NULL;
2866a00d601Sbellard     penv = &first_cpu;
2876a00d601Sbellard     cpu_index = 0;
2886a00d601Sbellard     while (*penv != NULL) {
2896a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
2906a00d601Sbellard         cpu_index++;
2916a00d601Sbellard     }
2926a00d601Sbellard     env->cpu_index = cpu_index;
2936658ffb8Spbrook     env->nb_watchpoints = 0;
2946a00d601Sbellard     *penv = env;
295fd6ce8f6Sbellard }
296fd6ce8f6Sbellard 
2979fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
2989fa3e853Sbellard {
2999fa3e853Sbellard     if (p->code_bitmap) {
30059817ccbSbellard         qemu_free(p->code_bitmap);
3019fa3e853Sbellard         p->code_bitmap = NULL;
3029fa3e853Sbellard     }
3039fa3e853Sbellard     p->code_write_count = 0;
3049fa3e853Sbellard }
3059fa3e853Sbellard 
306fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
307fd6ce8f6Sbellard static void page_flush_tb(void)
308fd6ce8f6Sbellard {
309fd6ce8f6Sbellard     int i, j;
310fd6ce8f6Sbellard     PageDesc *p;
311fd6ce8f6Sbellard 
312fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
313fd6ce8f6Sbellard         p = l1_map[i];
314fd6ce8f6Sbellard         if (p) {
3159fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
3169fa3e853Sbellard                 p->first_tb = NULL;
3179fa3e853Sbellard                 invalidate_page_bitmap(p);
3189fa3e853Sbellard                 p++;
3199fa3e853Sbellard             }
320fd6ce8f6Sbellard         }
321fd6ce8f6Sbellard     }
322fd6ce8f6Sbellard }
323fd6ce8f6Sbellard 
324fd6ce8f6Sbellard /* flush all the translation blocks */
325d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
3266a00d601Sbellard void tb_flush(CPUState *env1)
327fd6ce8f6Sbellard {
3286a00d601Sbellard     CPUState *env;
3290124311eSbellard #if defined(DEBUG_FLUSH)
330fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
332fd6ce8f6Sbellard            nb_tbs,
3330124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
334fd6ce8f6Sbellard #endif
335fd6ce8f6Sbellard     nb_tbs = 0;
3366a00d601Sbellard 
3376a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3388a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3396a00d601Sbellard     }
3409fa3e853Sbellard 
3418a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
342fd6ce8f6Sbellard     page_flush_tb();
3439fa3e853Sbellard 
344fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
345d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
346d4e8164fSbellard        expensive */
347e3db7226Sbellard     tb_flush_count++;
348fd6ce8f6Sbellard }
349fd6ce8f6Sbellard 
350fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
351fd6ce8f6Sbellard 
352bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
353fd6ce8f6Sbellard {
354fd6ce8f6Sbellard     TranslationBlock *tb;
355fd6ce8f6Sbellard     int i;
356fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
35799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
35899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
361fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
36299773bd4Spbrook                        address, (long)tb->pc, tb->size);
363fd6ce8f6Sbellard             }
364fd6ce8f6Sbellard         }
365fd6ce8f6Sbellard     }
366fd6ce8f6Sbellard }
367fd6ce8f6Sbellard 
368fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
369fd6ce8f6Sbellard static void tb_page_check(void)
370fd6ce8f6Sbellard {
371fd6ce8f6Sbellard     TranslationBlock *tb;
372fd6ce8f6Sbellard     int i, flags1, flags2;
373fd6ce8f6Sbellard 
37499773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
37599773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
376fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
377fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
378fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
38099773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
381fd6ce8f6Sbellard             }
382fd6ce8f6Sbellard         }
383fd6ce8f6Sbellard     }
384fd6ce8f6Sbellard }
385fd6ce8f6Sbellard 
386d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
387d4e8164fSbellard {
388d4e8164fSbellard     TranslationBlock *tb1;
389d4e8164fSbellard     unsigned int n1;
390d4e8164fSbellard 
391d4e8164fSbellard     /* suppress any remaining jumps to this TB */
392d4e8164fSbellard     tb1 = tb->jmp_first;
393d4e8164fSbellard     for(;;) {
394d4e8164fSbellard         n1 = (long)tb1 & 3;
395d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
396d4e8164fSbellard         if (n1 == 2)
397d4e8164fSbellard             break;
398d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
399d4e8164fSbellard     }
400d4e8164fSbellard     /* check end of list */
401d4e8164fSbellard     if (tb1 != tb) {
402d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
403d4e8164fSbellard     }
404d4e8164fSbellard }
405d4e8164fSbellard 
406fd6ce8f6Sbellard #endif
407fd6ce8f6Sbellard 
408fd6ce8f6Sbellard /* invalidate one TB */
409fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410fd6ce8f6Sbellard                              int next_offset)
411fd6ce8f6Sbellard {
412fd6ce8f6Sbellard     TranslationBlock *tb1;
413fd6ce8f6Sbellard     for(;;) {
414fd6ce8f6Sbellard         tb1 = *ptb;
415fd6ce8f6Sbellard         if (tb1 == tb) {
416fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417fd6ce8f6Sbellard             break;
418fd6ce8f6Sbellard         }
419fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
420fd6ce8f6Sbellard     }
421fd6ce8f6Sbellard }
422fd6ce8f6Sbellard 
4239fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
4249fa3e853Sbellard {
4259fa3e853Sbellard     TranslationBlock *tb1;
4269fa3e853Sbellard     unsigned int n1;
4279fa3e853Sbellard 
4289fa3e853Sbellard     for(;;) {
4299fa3e853Sbellard         tb1 = *ptb;
4309fa3e853Sbellard         n1 = (long)tb1 & 3;
4319fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4329fa3e853Sbellard         if (tb1 == tb) {
4339fa3e853Sbellard             *ptb = tb1->page_next[n1];
4349fa3e853Sbellard             break;
4359fa3e853Sbellard         }
4369fa3e853Sbellard         ptb = &tb1->page_next[n1];
4379fa3e853Sbellard     }
4389fa3e853Sbellard }
4399fa3e853Sbellard 
440d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
441d4e8164fSbellard {
442d4e8164fSbellard     TranslationBlock *tb1, **ptb;
443d4e8164fSbellard     unsigned int n1;
444d4e8164fSbellard 
445d4e8164fSbellard     ptb = &tb->jmp_next[n];
446d4e8164fSbellard     tb1 = *ptb;
447d4e8164fSbellard     if (tb1) {
448d4e8164fSbellard         /* find tb(n) in circular list */
449d4e8164fSbellard         for(;;) {
450d4e8164fSbellard             tb1 = *ptb;
451d4e8164fSbellard             n1 = (long)tb1 & 3;
452d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
453d4e8164fSbellard             if (n1 == n && tb1 == tb)
454d4e8164fSbellard                 break;
455d4e8164fSbellard             if (n1 == 2) {
456d4e8164fSbellard                 ptb = &tb1->jmp_first;
457d4e8164fSbellard             } else {
458d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
459d4e8164fSbellard             }
460d4e8164fSbellard         }
461d4e8164fSbellard         /* now we can suppress tb(n) from the list */
462d4e8164fSbellard         *ptb = tb->jmp_next[n];
463d4e8164fSbellard 
464d4e8164fSbellard         tb->jmp_next[n] = NULL;
465d4e8164fSbellard     }
466d4e8164fSbellard }
467d4e8164fSbellard 
468d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
469d4e8164fSbellard    another TB */
470d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
471d4e8164fSbellard {
472d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
473d4e8164fSbellard }
474d4e8164fSbellard 
4759fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
476fd6ce8f6Sbellard {
4776a00d601Sbellard     CPUState *env;
478fd6ce8f6Sbellard     PageDesc *p;
4798a40a180Sbellard     unsigned int h, n1;
4809fa3e853Sbellard     target_ulong phys_pc;
4818a40a180Sbellard     TranslationBlock *tb1, *tb2;
482fd6ce8f6Sbellard 
4839fa3e853Sbellard     /* remove the TB from the hash list */
4849fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4859fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4869fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4879fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
4889fa3e853Sbellard 
4899fa3e853Sbellard     /* remove the TB from the page list */
4909fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
4919fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
4929fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4939fa3e853Sbellard         invalidate_page_bitmap(p);
4949fa3e853Sbellard     }
4959fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
4969fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
4979fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4989fa3e853Sbellard         invalidate_page_bitmap(p);
4999fa3e853Sbellard     }
5009fa3e853Sbellard 
5018a40a180Sbellard     tb_invalidated_flag = 1;
5028a40a180Sbellard 
5038a40a180Sbellard     /* remove the TB from the hash list */
5048a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
5056a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
5066a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
5076a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
5086a00d601Sbellard     }
5098a40a180Sbellard 
5108a40a180Sbellard     /* suppress this TB from the two jump lists */
5118a40a180Sbellard     tb_jmp_remove(tb, 0);
5128a40a180Sbellard     tb_jmp_remove(tb, 1);
5138a40a180Sbellard 
5148a40a180Sbellard     /* suppress any remaining jumps to this TB */
5158a40a180Sbellard     tb1 = tb->jmp_first;
5168a40a180Sbellard     for(;;) {
5178a40a180Sbellard         n1 = (long)tb1 & 3;
5188a40a180Sbellard         if (n1 == 2)
5198a40a180Sbellard             break;
5208a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
5218a40a180Sbellard         tb2 = tb1->jmp_next[n1];
5228a40a180Sbellard         tb_reset_jump(tb1, n1);
5238a40a180Sbellard         tb1->jmp_next[n1] = NULL;
5248a40a180Sbellard         tb1 = tb2;
5258a40a180Sbellard     }
5268a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5278a40a180Sbellard 
528e3db7226Sbellard     tb_phys_invalidate_count++;
5299fa3e853Sbellard }
5309fa3e853Sbellard 
5319fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5329fa3e853Sbellard {
5339fa3e853Sbellard     int end, mask, end1;
5349fa3e853Sbellard 
5359fa3e853Sbellard     end = start + len;
5369fa3e853Sbellard     tab += start >> 3;
5379fa3e853Sbellard     mask = 0xff << (start & 7);
5389fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5399fa3e853Sbellard         if (start < end) {
5409fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5419fa3e853Sbellard             *tab |= mask;
5429fa3e853Sbellard         }
5439fa3e853Sbellard     } else {
5449fa3e853Sbellard         *tab++ |= mask;
5459fa3e853Sbellard         start = (start + 8) & ~7;
5469fa3e853Sbellard         end1 = end & ~7;
5479fa3e853Sbellard         while (start < end1) {
5489fa3e853Sbellard             *tab++ = 0xff;
5499fa3e853Sbellard             start += 8;
5509fa3e853Sbellard         }
5519fa3e853Sbellard         if (start < end) {
5529fa3e853Sbellard             mask = ~(0xff << (end & 7));
5539fa3e853Sbellard             *tab |= mask;
5549fa3e853Sbellard         }
5559fa3e853Sbellard     }
5569fa3e853Sbellard }
5579fa3e853Sbellard 
5589fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5599fa3e853Sbellard {
5609fa3e853Sbellard     int n, tb_start, tb_end;
5619fa3e853Sbellard     TranslationBlock *tb;
5629fa3e853Sbellard 
56359817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5649fa3e853Sbellard     if (!p->code_bitmap)
5659fa3e853Sbellard         return;
5669fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5679fa3e853Sbellard 
5689fa3e853Sbellard     tb = p->first_tb;
5699fa3e853Sbellard     while (tb != NULL) {
5709fa3e853Sbellard         n = (long)tb & 3;
5719fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5729fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5739fa3e853Sbellard         if (n == 0) {
5749fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5759fa3e853Sbellard                it is not a problem */
5769fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5779fa3e853Sbellard             tb_end = tb_start + tb->size;
5789fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5799fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5809fa3e853Sbellard         } else {
5819fa3e853Sbellard             tb_start = 0;
5829fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5839fa3e853Sbellard         }
5849fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5859fa3e853Sbellard         tb = tb->page_next[n];
5869fa3e853Sbellard     }
5879fa3e853Sbellard }
5889fa3e853Sbellard 
589d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
590d720b93dSbellard 
591d720b93dSbellard static void tb_gen_code(CPUState *env,
592d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
593d720b93dSbellard                         int cflags)
594d720b93dSbellard {
595d720b93dSbellard     TranslationBlock *tb;
596d720b93dSbellard     uint8_t *tc_ptr;
597d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
598d720b93dSbellard     int code_gen_size;
599d720b93dSbellard 
600c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
601c27004ecSbellard     tb = tb_alloc(pc);
602d720b93dSbellard     if (!tb) {
603d720b93dSbellard         /* flush must be done */
604d720b93dSbellard         tb_flush(env);
605d720b93dSbellard         /* cannot fail at this point */
606c27004ecSbellard         tb = tb_alloc(pc);
607d720b93dSbellard     }
608d720b93dSbellard     tc_ptr = code_gen_ptr;
609d720b93dSbellard     tb->tc_ptr = tc_ptr;
610d720b93dSbellard     tb->cs_base = cs_base;
611d720b93dSbellard     tb->flags = flags;
612d720b93dSbellard     tb->cflags = cflags;
613d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615d720b93dSbellard 
616d720b93dSbellard     /* check next page if needed */
617c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
618d720b93dSbellard     phys_page2 = -1;
619c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
621d720b93dSbellard     }
622d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
623d720b93dSbellard }
624d720b93dSbellard #endif
625d720b93dSbellard 
6269fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6279fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
628d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
629d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
630d720b93dSbellard    TB if code is modified inside this TB. */
631d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632d720b93dSbellard                                    int is_cpu_write_access)
6339fa3e853Sbellard {
634d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
635d720b93dSbellard     CPUState *env = cpu_single_env;
6369fa3e853Sbellard     PageDesc *p;
637ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6389fa3e853Sbellard     target_ulong tb_start, tb_end;
639d720b93dSbellard     target_ulong current_pc, current_cs_base;
6409fa3e853Sbellard 
6419fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6429fa3e853Sbellard     if (!p)
6439fa3e853Sbellard         return;
6449fa3e853Sbellard     if (!p->code_bitmap &&
645d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646d720b93dSbellard         is_cpu_write_access) {
6479fa3e853Sbellard         /* build code bitmap */
6489fa3e853Sbellard         build_page_bitmap(p);
6499fa3e853Sbellard     }
6509fa3e853Sbellard 
6519fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6529fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
653d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
654d720b93dSbellard     current_tb_modified = 0;
655d720b93dSbellard     current_tb = NULL; /* avoid warning */
656d720b93dSbellard     current_pc = 0; /* avoid warning */
657d720b93dSbellard     current_cs_base = 0; /* avoid warning */
658d720b93dSbellard     current_flags = 0; /* avoid warning */
6599fa3e853Sbellard     tb = p->first_tb;
6609fa3e853Sbellard     while (tb != NULL) {
6619fa3e853Sbellard         n = (long)tb & 3;
6629fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6639fa3e853Sbellard         tb_next = tb->page_next[n];
6649fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6659fa3e853Sbellard         if (n == 0) {
6669fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6679fa3e853Sbellard                it is not a problem */
6689fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6699fa3e853Sbellard             tb_end = tb_start + tb->size;
6709fa3e853Sbellard         } else {
6719fa3e853Sbellard             tb_start = tb->page_addr[1];
6729fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6739fa3e853Sbellard         }
6749fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
675d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
676d720b93dSbellard             if (current_tb_not_found) {
677d720b93dSbellard                 current_tb_not_found = 0;
678d720b93dSbellard                 current_tb = NULL;
679d720b93dSbellard                 if (env->mem_write_pc) {
680d720b93dSbellard                     /* now we have a real cpu fault */
681d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
682d720b93dSbellard                 }
683d720b93dSbellard             }
684d720b93dSbellard             if (current_tb == tb &&
685d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
686d720b93dSbellard                 /* If we are modifying the current TB, we must stop
687d720b93dSbellard                 its execution. We could be more precise by checking
688d720b93dSbellard                 that the modification is after the current PC, but it
689d720b93dSbellard                 would require a specialized function to partially
690d720b93dSbellard                 restore the CPU state */
691d720b93dSbellard 
692d720b93dSbellard                 current_tb_modified = 1;
693d720b93dSbellard                 cpu_restore_state(current_tb, env,
694d720b93dSbellard                                   env->mem_write_pc, NULL);
695d720b93dSbellard #if defined(TARGET_I386)
696d720b93dSbellard                 current_flags = env->hflags;
697d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
699d720b93dSbellard                 current_pc = current_cs_base + env->eip;
700d720b93dSbellard #else
701d720b93dSbellard #error unsupported CPU
702d720b93dSbellard #endif
703d720b93dSbellard             }
704d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7056f5a9f7eSbellard             /* we need to do that to handle the case where a signal
7066f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
7076f5a9f7eSbellard             saved_tb = NULL;
7086f5a9f7eSbellard             if (env) {
709ea1c1802Sbellard                 saved_tb = env->current_tb;
710ea1c1802Sbellard                 env->current_tb = NULL;
7116f5a9f7eSbellard             }
7129fa3e853Sbellard             tb_phys_invalidate(tb, -1);
7136f5a9f7eSbellard             if (env) {
714ea1c1802Sbellard                 env->current_tb = saved_tb;
715ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
716ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
7179fa3e853Sbellard             }
7186f5a9f7eSbellard         }
7199fa3e853Sbellard         tb = tb_next;
7209fa3e853Sbellard     }
7219fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
7229fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
7239fa3e853Sbellard     if (!p->first_tb) {
7249fa3e853Sbellard         invalidate_page_bitmap(p);
725d720b93dSbellard         if (is_cpu_write_access) {
726d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
727d720b93dSbellard         }
728d720b93dSbellard     }
729d720b93dSbellard #endif
730d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
731d720b93dSbellard     if (current_tb_modified) {
732d720b93dSbellard         /* we generate a block containing just the instruction
733d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
734d720b93dSbellard            itself */
735ea1c1802Sbellard         env->current_tb = NULL;
736d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
737d720b93dSbellard                     CF_SINGLE_INSN);
738d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7399fa3e853Sbellard     }
7409fa3e853Sbellard #endif
7419fa3e853Sbellard }
7429fa3e853Sbellard 
7439fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
744d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7459fa3e853Sbellard {
7469fa3e853Sbellard     PageDesc *p;
7479fa3e853Sbellard     int offset, b;
74859817ccbSbellard #if 0
749a4193c8aSbellard     if (1) {
750a4193c8aSbellard         if (loglevel) {
751a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
753a4193c8aSbellard                    cpu_single_env->eip,
754a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
755a4193c8aSbellard         }
75659817ccbSbellard     }
75759817ccbSbellard #endif
7589fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7599fa3e853Sbellard     if (!p)
7609fa3e853Sbellard         return;
7619fa3e853Sbellard     if (p->code_bitmap) {
7629fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7639fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7649fa3e853Sbellard         if (b & ((1 << len) - 1))
7659fa3e853Sbellard             goto do_invalidate;
7669fa3e853Sbellard     } else {
7679fa3e853Sbellard     do_invalidate:
768d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7699fa3e853Sbellard     }
7709fa3e853Sbellard }
7719fa3e853Sbellard 
7729fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
773d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
774d720b93dSbellard                                     unsigned long pc, void *puc)
7759fa3e853Sbellard {
776d720b93dSbellard     int n, current_flags, current_tb_modified;
777d720b93dSbellard     target_ulong current_pc, current_cs_base;
7789fa3e853Sbellard     PageDesc *p;
779d720b93dSbellard     TranslationBlock *tb, *current_tb;
780d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
781d720b93dSbellard     CPUState *env = cpu_single_env;
782d720b93dSbellard #endif
7839fa3e853Sbellard 
7849fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7859fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
786fd6ce8f6Sbellard     if (!p)
787fd6ce8f6Sbellard         return;
788fd6ce8f6Sbellard     tb = p->first_tb;
789d720b93dSbellard     current_tb_modified = 0;
790d720b93dSbellard     current_tb = NULL;
791d720b93dSbellard     current_pc = 0; /* avoid warning */
792d720b93dSbellard     current_cs_base = 0; /* avoid warning */
793d720b93dSbellard     current_flags = 0; /* avoid warning */
794d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
795d720b93dSbellard     if (tb && pc != 0) {
796d720b93dSbellard         current_tb = tb_find_pc(pc);
797d720b93dSbellard     }
798d720b93dSbellard #endif
799fd6ce8f6Sbellard     while (tb != NULL) {
8009fa3e853Sbellard         n = (long)tb & 3;
8019fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
802d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
803d720b93dSbellard         if (current_tb == tb &&
804d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
805d720b93dSbellard                 /* If we are modifying the current TB, we must stop
806d720b93dSbellard                    its execution. We could be more precise by checking
807d720b93dSbellard                    that the modification is after the current PC, but it
808d720b93dSbellard                    would require a specialized function to partially
809d720b93dSbellard                    restore the CPU state */
810d720b93dSbellard 
811d720b93dSbellard             current_tb_modified = 1;
812d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
813d720b93dSbellard #if defined(TARGET_I386)
814d720b93dSbellard             current_flags = env->hflags;
815d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
817d720b93dSbellard             current_pc = current_cs_base + env->eip;
818d720b93dSbellard #else
819d720b93dSbellard #error unsupported CPU
820d720b93dSbellard #endif
821d720b93dSbellard         }
822d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
8239fa3e853Sbellard         tb_phys_invalidate(tb, addr);
8249fa3e853Sbellard         tb = tb->page_next[n];
825fd6ce8f6Sbellard     }
826fd6ce8f6Sbellard     p->first_tb = NULL;
827d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
828d720b93dSbellard     if (current_tb_modified) {
829d720b93dSbellard         /* we generate a block containing just the instruction
830d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
831d720b93dSbellard            itself */
832ea1c1802Sbellard         env->current_tb = NULL;
833d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
834d720b93dSbellard                     CF_SINGLE_INSN);
835d720b93dSbellard         cpu_resume_from_signal(env, puc);
836d720b93dSbellard     }
837d720b93dSbellard #endif
838fd6ce8f6Sbellard }
8399fa3e853Sbellard #endif
840fd6ce8f6Sbellard 
841fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8429fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
84353a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
844fd6ce8f6Sbellard {
845fd6ce8f6Sbellard     PageDesc *p;
8469fa3e853Sbellard     TranslationBlock *last_first_tb;
8479fa3e853Sbellard 
8489fa3e853Sbellard     tb->page_addr[n] = page_addr;
8493a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8509fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8519fa3e853Sbellard     last_first_tb = p->first_tb;
8529fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8539fa3e853Sbellard     invalidate_page_bitmap(p);
8549fa3e853Sbellard 
855107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
856d720b93dSbellard 
8579fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8589fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
85953a5960aSpbrook         target_ulong addr;
86053a5960aSpbrook         PageDesc *p2;
861fd6ce8f6Sbellard         int prot;
862fd6ce8f6Sbellard 
863fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
864fd6ce8f6Sbellard            page fault + mprotect overhead) */
86553a5960aSpbrook         page_addr &= qemu_host_page_mask;
866fd6ce8f6Sbellard         prot = 0;
86753a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
86853a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
86953a5960aSpbrook 
87053a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
87153a5960aSpbrook             if (!p2)
87253a5960aSpbrook                 continue;
87353a5960aSpbrook             prot |= p2->flags;
87453a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
87553a5960aSpbrook             page_get_flags(addr);
87653a5960aSpbrook           }
87753a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
878fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
879fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
880fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
88153a5960aSpbrook                page_addr);
882fd6ce8f6Sbellard #endif
883fd6ce8f6Sbellard     }
8849fa3e853Sbellard #else
8859fa3e853Sbellard     /* if some code is already present, then the pages are already
8869fa3e853Sbellard        protected. So we handle the case where only the first TB is
8879fa3e853Sbellard        allocated in a physical page */
8889fa3e853Sbellard     if (!last_first_tb) {
8896a00d601Sbellard         tlb_protect_code(page_addr);
8909fa3e853Sbellard     }
8919fa3e853Sbellard #endif
892d720b93dSbellard 
893d720b93dSbellard #endif /* TARGET_HAS_SMC */
894fd6ce8f6Sbellard }
895fd6ce8f6Sbellard 
896fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
897fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
898c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
899fd6ce8f6Sbellard {
900fd6ce8f6Sbellard     TranslationBlock *tb;
901fd6ce8f6Sbellard 
902fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
903fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
904d4e8164fSbellard         return NULL;
905fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
906fd6ce8f6Sbellard     tb->pc = pc;
907b448f2f3Sbellard     tb->cflags = 0;
908d4e8164fSbellard     return tb;
909d4e8164fSbellard }
910d4e8164fSbellard 
9119fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
9129fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
9139fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
9149fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
915d4e8164fSbellard {
9169fa3e853Sbellard     unsigned int h;
9179fa3e853Sbellard     TranslationBlock **ptb;
9189fa3e853Sbellard 
9199fa3e853Sbellard     /* add in the physical hash table */
9209fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9219fa3e853Sbellard     ptb = &tb_phys_hash[h];
9229fa3e853Sbellard     tb->phys_hash_next = *ptb;
9239fa3e853Sbellard     *ptb = tb;
924fd6ce8f6Sbellard 
925fd6ce8f6Sbellard     /* add in the page list */
9269fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9279fa3e853Sbellard     if (phys_page2 != -1)
9289fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9299fa3e853Sbellard     else
9309fa3e853Sbellard         tb->page_addr[1] = -1;
9319fa3e853Sbellard 
932d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933d4e8164fSbellard     tb->jmp_next[0] = NULL;
934d4e8164fSbellard     tb->jmp_next[1] = NULL;
935b448f2f3Sbellard #ifdef USE_CODE_COPY
936b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
937b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
938b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
939b448f2f3Sbellard #endif
940d4e8164fSbellard 
941d4e8164fSbellard     /* init original jump addresses */
942d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
943d4e8164fSbellard         tb_reset_jump(tb, 0);
944d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
945d4e8164fSbellard         tb_reset_jump(tb, 1);
9468a40a180Sbellard 
9478a40a180Sbellard #ifdef DEBUG_TB_CHECK
9488a40a180Sbellard     tb_page_check();
9498a40a180Sbellard #endif
950fd6ce8f6Sbellard }
951fd6ce8f6Sbellard 
952a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
954a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
955a513fe19Sbellard {
956a513fe19Sbellard     int m_min, m_max, m;
957a513fe19Sbellard     unsigned long v;
958a513fe19Sbellard     TranslationBlock *tb;
959a513fe19Sbellard 
960a513fe19Sbellard     if (nb_tbs <= 0)
961a513fe19Sbellard         return NULL;
962a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
963a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
964a513fe19Sbellard         return NULL;
965a513fe19Sbellard     /* binary search (cf Knuth) */
966a513fe19Sbellard     m_min = 0;
967a513fe19Sbellard     m_max = nb_tbs - 1;
968a513fe19Sbellard     while (m_min <= m_max) {
969a513fe19Sbellard         m = (m_min + m_max) >> 1;
970a513fe19Sbellard         tb = &tbs[m];
971a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
972a513fe19Sbellard         if (v == tc_ptr)
973a513fe19Sbellard             return tb;
974a513fe19Sbellard         else if (tc_ptr < v) {
975a513fe19Sbellard             m_max = m - 1;
976a513fe19Sbellard         } else {
977a513fe19Sbellard             m_min = m + 1;
978a513fe19Sbellard         }
979a513fe19Sbellard     }
980a513fe19Sbellard     return &tbs[m_max];
981a513fe19Sbellard }
9827501267eSbellard 
983ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
984ea041c0eSbellard 
985ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
986ea041c0eSbellard {
987ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
988ea041c0eSbellard     unsigned int n1;
989ea041c0eSbellard 
990ea041c0eSbellard     tb1 = tb->jmp_next[n];
991ea041c0eSbellard     if (tb1 != NULL) {
992ea041c0eSbellard         /* find head of list */
993ea041c0eSbellard         for(;;) {
994ea041c0eSbellard             n1 = (long)tb1 & 3;
995ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
996ea041c0eSbellard             if (n1 == 2)
997ea041c0eSbellard                 break;
998ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
999ea041c0eSbellard         }
1000ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1001ea041c0eSbellard         tb_next = tb1;
1002ea041c0eSbellard 
1003ea041c0eSbellard         /* remove tb from the jmp_first list */
1004ea041c0eSbellard         ptb = &tb_next->jmp_first;
1005ea041c0eSbellard         for(;;) {
1006ea041c0eSbellard             tb1 = *ptb;
1007ea041c0eSbellard             n1 = (long)tb1 & 3;
1008ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009ea041c0eSbellard             if (n1 == n && tb1 == tb)
1010ea041c0eSbellard                 break;
1011ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1012ea041c0eSbellard         }
1013ea041c0eSbellard         *ptb = tb->jmp_next[n];
1014ea041c0eSbellard         tb->jmp_next[n] = NULL;
1015ea041c0eSbellard 
1016ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1017ea041c0eSbellard         tb_reset_jump(tb, n);
1018ea041c0eSbellard 
10190124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1020ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1021ea041c0eSbellard     }
1022ea041c0eSbellard }
1023ea041c0eSbellard 
1024ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1025ea041c0eSbellard {
1026ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1027ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1028ea041c0eSbellard }
1029ea041c0eSbellard 
10301fddef4bSbellard #if defined(TARGET_HAS_ICE)
1031d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1032d720b93dSbellard {
1033c2f07f81Spbrook     target_ulong addr, pd;
1034c2f07f81Spbrook     ram_addr_t ram_addr;
1035c2f07f81Spbrook     PhysPageDesc *p;
1036d720b93dSbellard 
1037c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1038c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1039c2f07f81Spbrook     if (!p) {
1040c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1041c2f07f81Spbrook     } else {
1042c2f07f81Spbrook         pd = p->phys_offset;
1043c2f07f81Spbrook     }
1044c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1045706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1046d720b93dSbellard }
1047c27004ecSbellard #endif
1048d720b93dSbellard 
10496658ffb8Spbrook /* Add a watchpoint.  */
10506658ffb8Spbrook int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
10516658ffb8Spbrook {
10526658ffb8Spbrook     int i;
10536658ffb8Spbrook 
10546658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
10556658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr)
10566658ffb8Spbrook             return 0;
10576658ffb8Spbrook     }
10586658ffb8Spbrook     if (env->nb_watchpoints >= MAX_WATCHPOINTS)
10596658ffb8Spbrook         return -1;
10606658ffb8Spbrook 
10616658ffb8Spbrook     i = env->nb_watchpoints++;
10626658ffb8Spbrook     env->watchpoint[i].vaddr = addr;
10636658ffb8Spbrook     tlb_flush_page(env, addr);
10646658ffb8Spbrook     /* FIXME: This flush is needed because of the hack to make memory ops
10656658ffb8Spbrook        terminate the TB.  It can be removed once the proper IO trap and
10666658ffb8Spbrook        re-execute bits are in.  */
10676658ffb8Spbrook     tb_flush(env);
10686658ffb8Spbrook     return i;
10696658ffb8Spbrook }
10706658ffb8Spbrook 
10716658ffb8Spbrook /* Remove a watchpoint.  */
10726658ffb8Spbrook int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
10736658ffb8Spbrook {
10746658ffb8Spbrook     int i;
10756658ffb8Spbrook 
10766658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
10776658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr) {
10786658ffb8Spbrook             env->nb_watchpoints--;
10796658ffb8Spbrook             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
10806658ffb8Spbrook             tlb_flush_page(env, addr);
10816658ffb8Spbrook             return 0;
10826658ffb8Spbrook         }
10836658ffb8Spbrook     }
10846658ffb8Spbrook     return -1;
10856658ffb8Spbrook }
10866658ffb8Spbrook 
1087c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088c33a346eSbellard    breakpoint is reached */
10892e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
10904c3a88a2Sbellard {
10911fddef4bSbellard #if defined(TARGET_HAS_ICE)
10924c3a88a2Sbellard     int i;
10934c3a88a2Sbellard 
10944c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10954c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10964c3a88a2Sbellard             return 0;
10974c3a88a2Sbellard     }
10984c3a88a2Sbellard 
10994c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
11004c3a88a2Sbellard         return -1;
11014c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1102d720b93dSbellard 
1103d720b93dSbellard     breakpoint_invalidate(env, pc);
11044c3a88a2Sbellard     return 0;
11054c3a88a2Sbellard #else
11064c3a88a2Sbellard     return -1;
11074c3a88a2Sbellard #endif
11084c3a88a2Sbellard }
11094c3a88a2Sbellard 
11104c3a88a2Sbellard /* remove a breakpoint */
11112e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
11124c3a88a2Sbellard {
11131fddef4bSbellard #if defined(TARGET_HAS_ICE)
11144c3a88a2Sbellard     int i;
11154c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
11164c3a88a2Sbellard         if (env->breakpoints[i] == pc)
11174c3a88a2Sbellard             goto found;
11184c3a88a2Sbellard     }
11194c3a88a2Sbellard     return -1;
11204c3a88a2Sbellard  found:
11214c3a88a2Sbellard     env->nb_breakpoints--;
11221fddef4bSbellard     if (i < env->nb_breakpoints)
11231fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1124d720b93dSbellard 
1125d720b93dSbellard     breakpoint_invalidate(env, pc);
11264c3a88a2Sbellard     return 0;
11274c3a88a2Sbellard #else
11284c3a88a2Sbellard     return -1;
11294c3a88a2Sbellard #endif
11304c3a88a2Sbellard }
11314c3a88a2Sbellard 
1132c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1133c33a346eSbellard    CPU loop after each instruction */
1134c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1135c33a346eSbellard {
11361fddef4bSbellard #if defined(TARGET_HAS_ICE)
1137c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1138c33a346eSbellard         env->singlestep_enabled = enabled;
1139c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
11409fa3e853Sbellard         /* XXX: only flush what is necessary */
11410124311eSbellard         tb_flush(env);
1142c33a346eSbellard     }
1143c33a346eSbellard #endif
1144c33a346eSbellard }
1145c33a346eSbellard 
114634865134Sbellard /* enable or disable low levels log */
114734865134Sbellard void cpu_set_log(int log_flags)
114834865134Sbellard {
114934865134Sbellard     loglevel = log_flags;
115034865134Sbellard     if (loglevel && !logfile) {
115134865134Sbellard         logfile = fopen(logfilename, "w");
115234865134Sbellard         if (!logfile) {
115334865134Sbellard             perror(logfilename);
115434865134Sbellard             _exit(1);
115534865134Sbellard         }
11569fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
11579fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
11589fa3e853Sbellard         {
11599fa3e853Sbellard             static uint8_t logfile_buf[4096];
11609fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
11619fa3e853Sbellard         }
11629fa3e853Sbellard #else
116334865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
11649fa3e853Sbellard #endif
116534865134Sbellard     }
116634865134Sbellard }
116734865134Sbellard 
116834865134Sbellard void cpu_set_log_filename(const char *filename)
116934865134Sbellard {
117034865134Sbellard     logfilename = strdup(filename);
117134865134Sbellard }
1172c33a346eSbellard 
11730124311eSbellard /* mask must never be zero, except for A20 change call */
117468a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1175ea041c0eSbellard {
1176ea041c0eSbellard     TranslationBlock *tb;
1177ee8b7021Sbellard     static int interrupt_lock;
1178ea041c0eSbellard 
117968a79315Sbellard     env->interrupt_request |= mask;
1180ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1181ea041c0eSbellard        all the potentially executing TB */
1182ea041c0eSbellard     tb = env->current_tb;
1183ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1184ee8b7021Sbellard         env->current_tb = NULL;
1185ea041c0eSbellard         tb_reset_jump_recursive(tb);
1186ee8b7021Sbellard         interrupt_lock = 0;
1187ea041c0eSbellard     }
1188ea041c0eSbellard }
1189ea041c0eSbellard 
1190b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1191b54ad049Sbellard {
1192b54ad049Sbellard     env->interrupt_request &= ~mask;
1193b54ad049Sbellard }
1194b54ad049Sbellard 
1195f193c797Sbellard CPULogItem cpu_log_items[] = {
1196f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1197f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1198f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1199f193c797Sbellard       "show target assembly code for each compiled TB" },
1200f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1201f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1202f193c797Sbellard #ifdef TARGET_I386
1203f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1204f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1205f193c797Sbellard #endif
1206f193c797Sbellard     { CPU_LOG_INT, "int",
1207f193c797Sbellard       "show interrupts/exceptions in short format" },
1208f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1209f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
12109fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
12119fddaa0cSbellard       "show CPU state before bloc translation" },
1212f193c797Sbellard #ifdef TARGET_I386
1213f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1214f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1215f193c797Sbellard #endif
12168e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1217fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1218fd872598Sbellard       "show all i/o ports accesses" },
12198e3a9fd2Sbellard #endif
1220f193c797Sbellard     { 0, NULL, NULL },
1221f193c797Sbellard };
1222f193c797Sbellard 
1223f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1224f193c797Sbellard {
1225f193c797Sbellard     if (strlen(s2) != n)
1226f193c797Sbellard         return 0;
1227f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1228f193c797Sbellard }
1229f193c797Sbellard 
1230f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1231f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1232f193c797Sbellard {
1233f193c797Sbellard     CPULogItem *item;
1234f193c797Sbellard     int mask;
1235f193c797Sbellard     const char *p, *p1;
1236f193c797Sbellard 
1237f193c797Sbellard     p = str;
1238f193c797Sbellard     mask = 0;
1239f193c797Sbellard     for(;;) {
1240f193c797Sbellard         p1 = strchr(p, ',');
1241f193c797Sbellard         if (!p1)
1242f193c797Sbellard             p1 = p + strlen(p);
12438e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
12448e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
12458e3a9fd2Sbellard 			mask |= item->mask;
12468e3a9fd2Sbellard 		}
12478e3a9fd2Sbellard 	} else {
1248f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1249f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1250f193c797Sbellard                 goto found;
1251f193c797Sbellard         }
1252f193c797Sbellard         return 0;
12538e3a9fd2Sbellard 	}
1254f193c797Sbellard     found:
1255f193c797Sbellard         mask |= item->mask;
1256f193c797Sbellard         if (*p1 != ',')
1257f193c797Sbellard             break;
1258f193c797Sbellard         p = p1 + 1;
1259f193c797Sbellard     }
1260f193c797Sbellard     return mask;
1261f193c797Sbellard }
1262ea041c0eSbellard 
12637501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
12647501267eSbellard {
12657501267eSbellard     va_list ap;
12667501267eSbellard 
12677501267eSbellard     va_start(ap, fmt);
12687501267eSbellard     fprintf(stderr, "qemu: fatal: ");
12697501267eSbellard     vfprintf(stderr, fmt, ap);
12707501267eSbellard     fprintf(stderr, "\n");
12717501267eSbellard #ifdef TARGET_I386
12727fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
12737fe48483Sbellard #else
12747fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
12757501267eSbellard #endif
12767501267eSbellard     va_end(ap);
12777501267eSbellard     abort();
12787501267eSbellard }
12797501267eSbellard 
1280c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1281c5be9f08Sths {
1282c5be9f08Sths     CPUState *new_env = cpu_init();
1283c5be9f08Sths     /* preserve chaining and index */
1284c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1285c5be9f08Sths     int cpu_index = new_env->cpu_index;
1286c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
1287c5be9f08Sths     new_env->next_cpu = next_cpu;
1288c5be9f08Sths     new_env->cpu_index = cpu_index;
1289c5be9f08Sths     return new_env;
1290c5be9f08Sths }
1291c5be9f08Sths 
12920124311eSbellard #if !defined(CONFIG_USER_ONLY)
12930124311eSbellard 
1294ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1295ee8b7021Sbellard    implemented yet) */
1296ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
129733417e70Sbellard {
129833417e70Sbellard     int i;
12990124311eSbellard 
13009fa3e853Sbellard #if defined(DEBUG_TLB)
13019fa3e853Sbellard     printf("tlb_flush:\n");
13029fa3e853Sbellard #endif
13030124311eSbellard     /* must reset current TB so that interrupts cannot modify the
13040124311eSbellard        links while we are modifying them */
13050124311eSbellard     env->current_tb = NULL;
13060124311eSbellard 
130733417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
130884b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
130984b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
131084b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
131184b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
131284b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
131384b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
13146fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
13156fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
13166fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
13176fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
13186fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
13196fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
13206fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
13216fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
13226fa4cea9Sj_mayer #endif
13236fa4cea9Sj_mayer #endif
132433417e70Sbellard     }
13259fa3e853Sbellard 
13268a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
13279fa3e853Sbellard 
13289fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13299fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
13309fa3e853Sbellard #endif
13310a962c02Sbellard #ifdef USE_KQEMU
13320a962c02Sbellard     if (env->kqemu_enabled) {
13330a962c02Sbellard         kqemu_flush(env, flush_global);
13340a962c02Sbellard     }
13350a962c02Sbellard #endif
1336e3db7226Sbellard     tlb_flush_count++;
133733417e70Sbellard }
133833417e70Sbellard 
1339274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
134061382a50Sbellard {
134184b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
134284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
134384b7b8e7Sbellard         addr == (tlb_entry->addr_write &
134484b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
134584b7b8e7Sbellard         addr == (tlb_entry->addr_code &
134684b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
134784b7b8e7Sbellard         tlb_entry->addr_read = -1;
134884b7b8e7Sbellard         tlb_entry->addr_write = -1;
134984b7b8e7Sbellard         tlb_entry->addr_code = -1;
135084b7b8e7Sbellard     }
135161382a50Sbellard }
135261382a50Sbellard 
13532e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
135433417e70Sbellard {
13558a40a180Sbellard     int i;
13569fa3e853Sbellard     TranslationBlock *tb;
13570124311eSbellard 
13589fa3e853Sbellard #if defined(DEBUG_TLB)
1359108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
13609fa3e853Sbellard #endif
13610124311eSbellard     /* must reset current TB so that interrupts cannot modify the
13620124311eSbellard        links while we are modifying them */
13630124311eSbellard     env->current_tb = NULL;
136433417e70Sbellard 
136561382a50Sbellard     addr &= TARGET_PAGE_MASK;
136633417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
136784b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
136884b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
13696fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
13706fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
13716fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
13726fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
13736fa4cea9Sj_mayer #endif
13746fa4cea9Sj_mayer #endif
13750124311eSbellard 
1376b362e5e0Spbrook     /* Discard jump cache entries for any tb which might potentially
1377b362e5e0Spbrook        overlap the flushed page.  */
1378b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1379b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1380b362e5e0Spbrook 
1381b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr);
1382b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
138361382a50Sbellard 
13849fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13859fa3e853Sbellard     if (addr < MMAP_AREA_END)
13869fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
13879fa3e853Sbellard #endif
13880a962c02Sbellard #ifdef USE_KQEMU
13890a962c02Sbellard     if (env->kqemu_enabled) {
13900a962c02Sbellard         kqemu_flush_page(env, addr);
13910a962c02Sbellard     }
13920a962c02Sbellard #endif
13939fa3e853Sbellard }
13949fa3e853Sbellard 
13959fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
13969fa3e853Sbellard    can be detected */
13976a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
139861382a50Sbellard {
13996a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
14006a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
14016a00d601Sbellard                                     CODE_DIRTY_FLAG);
14029fa3e853Sbellard }
14039fa3e853Sbellard 
14049fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
14053a7d929eSbellard    tested for self modifying code */
14063a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
14073a7d929eSbellard                                     target_ulong vaddr)
14089fa3e853Sbellard {
14093a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
14109fa3e853Sbellard }
14119fa3e853Sbellard 
14121ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
14131ccde1cbSbellard                                          unsigned long start, unsigned long length)
14141ccde1cbSbellard {
14151ccde1cbSbellard     unsigned long addr;
141684b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
141784b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14181ccde1cbSbellard         if ((addr - start) < length) {
141984b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
14201ccde1cbSbellard         }
14211ccde1cbSbellard     }
14221ccde1cbSbellard }
14231ccde1cbSbellard 
14243a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
14250a962c02Sbellard                                      int dirty_flags)
14261ccde1cbSbellard {
14271ccde1cbSbellard     CPUState *env;
14284f2ac237Sbellard     unsigned long length, start1;
14290a962c02Sbellard     int i, mask, len;
14300a962c02Sbellard     uint8_t *p;
14311ccde1cbSbellard 
14321ccde1cbSbellard     start &= TARGET_PAGE_MASK;
14331ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
14341ccde1cbSbellard 
14351ccde1cbSbellard     length = end - start;
14361ccde1cbSbellard     if (length == 0)
14371ccde1cbSbellard         return;
14380a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
14393a7d929eSbellard #ifdef USE_KQEMU
14406a00d601Sbellard     /* XXX: should not depend on cpu context */
14416a00d601Sbellard     env = first_cpu;
14423a7d929eSbellard     if (env->kqemu_enabled) {
1443f23db169Sbellard         ram_addr_t addr;
1444f23db169Sbellard         addr = start;
1445f23db169Sbellard         for(i = 0; i < len; i++) {
1446f23db169Sbellard             kqemu_set_notdirty(env, addr);
1447f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1448f23db169Sbellard         }
14493a7d929eSbellard     }
14503a7d929eSbellard #endif
1451f23db169Sbellard     mask = ~dirty_flags;
1452f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1453f23db169Sbellard     for(i = 0; i < len; i++)
1454f23db169Sbellard         p[i] &= mask;
1455f23db169Sbellard 
14561ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
14571ccde1cbSbellard        when accessing the range */
145859817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
14596a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
14601ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
146184b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
14621ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
146384b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
14646fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
14656fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
14666fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
14676fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
14686fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
14696fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
14706fa4cea9Sj_mayer #endif
14716fa4cea9Sj_mayer #endif
14726a00d601Sbellard     }
147359817ccbSbellard 
147459817ccbSbellard #if !defined(CONFIG_SOFTMMU)
147559817ccbSbellard     /* XXX: this is expensive */
147659817ccbSbellard     {
147759817ccbSbellard         VirtPageDesc *p;
147859817ccbSbellard         int j;
147959817ccbSbellard         target_ulong addr;
148059817ccbSbellard 
148159817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
148259817ccbSbellard             p = l1_virt_map[i];
148359817ccbSbellard             if (p) {
148459817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
148559817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
148659817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
148759817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
148859817ccbSbellard                         (p->prot & PROT_WRITE)) {
148959817ccbSbellard                         if (addr < MMAP_AREA_END) {
149059817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
149159817ccbSbellard                                      p->prot & ~PROT_WRITE);
149259817ccbSbellard                         }
149359817ccbSbellard                     }
149459817ccbSbellard                     addr += TARGET_PAGE_SIZE;
149559817ccbSbellard                     p++;
149659817ccbSbellard                 }
149759817ccbSbellard             }
149859817ccbSbellard         }
149959817ccbSbellard     }
150059817ccbSbellard #endif
15011ccde1cbSbellard }
15021ccde1cbSbellard 
15033a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
15043a7d929eSbellard {
15053a7d929eSbellard     ram_addr_t ram_addr;
15063a7d929eSbellard 
150784b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
150884b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
15093a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
15103a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
151184b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
15123a7d929eSbellard         }
15133a7d929eSbellard     }
15143a7d929eSbellard }
15153a7d929eSbellard 
15163a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
15173a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
15183a7d929eSbellard {
15193a7d929eSbellard     int i;
15203a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
152184b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
15223a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
152384b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
15246fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15256fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15266fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
15276fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15286fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15296fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
15306fa4cea9Sj_mayer #endif
15316fa4cea9Sj_mayer #endif
15323a7d929eSbellard }
15333a7d929eSbellard 
15341ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
15351ccde1cbSbellard                                   unsigned long start)
15361ccde1cbSbellard {
15371ccde1cbSbellard     unsigned long addr;
153884b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
153984b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
15401ccde1cbSbellard         if (addr == start) {
154184b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
15421ccde1cbSbellard         }
15431ccde1cbSbellard     }
15441ccde1cbSbellard }
15451ccde1cbSbellard 
15461ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
15471ccde1cbSbellard    addr so that it is no longer dirty */
15486a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
15496a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
15501ccde1cbSbellard {
15511ccde1cbSbellard     int i;
15521ccde1cbSbellard 
15531ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
15541ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
155584b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
155684b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
15576fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15586fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[2][i], addr);
15596fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15606fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[3][i], addr);
15616fa4cea9Sj_mayer #endif
15626fa4cea9Sj_mayer #endif
15631ccde1cbSbellard }
15641ccde1cbSbellard 
156559817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
156659817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
156759817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
156859817ccbSbellard    conflicting with the host address space). */
156984b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
15702e12669aSbellard                       target_phys_addr_t paddr, int prot,
15719fa3e853Sbellard                       int is_user, int is_softmmu)
15729fa3e853Sbellard {
157392e873b9Sbellard     PhysPageDesc *p;
15744f2ac237Sbellard     unsigned long pd;
15759fa3e853Sbellard     unsigned int index;
15764f2ac237Sbellard     target_ulong address;
1577108c49b8Sbellard     target_phys_addr_t addend;
15789fa3e853Sbellard     int ret;
157984b7b8e7Sbellard     CPUTLBEntry *te;
15806658ffb8Spbrook     int i;
15819fa3e853Sbellard 
158292e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
15839fa3e853Sbellard     if (!p) {
15849fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
15859fa3e853Sbellard     } else {
15869fa3e853Sbellard         pd = p->phys_offset;
15879fa3e853Sbellard     }
15889fa3e853Sbellard #if defined(DEBUG_TLB)
15893a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
159084b7b8e7Sbellard            vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
15919fa3e853Sbellard #endif
15929fa3e853Sbellard 
15939fa3e853Sbellard     ret = 0;
15949fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15959fa3e853Sbellard     if (is_softmmu)
15969fa3e853Sbellard #endif
15979fa3e853Sbellard     {
15982a4188a3Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
15999fa3e853Sbellard             /* IO memory case */
16009fa3e853Sbellard             address = vaddr | pd;
16019fa3e853Sbellard             addend = paddr;
16029fa3e853Sbellard         } else {
16039fa3e853Sbellard             /* standard memory */
16049fa3e853Sbellard             address = vaddr;
16059fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
16069fa3e853Sbellard         }
16079fa3e853Sbellard 
16086658ffb8Spbrook         /* Make accesses to pages with watchpoints go via the
16096658ffb8Spbrook            watchpoint trap routines.  */
16106658ffb8Spbrook         for (i = 0; i < env->nb_watchpoints; i++) {
16116658ffb8Spbrook             if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
16126658ffb8Spbrook                 if (address & ~TARGET_PAGE_MASK) {
16136658ffb8Spbrook                     env->watchpoint[i].is_ram = 0;
16146658ffb8Spbrook                     address = vaddr | io_mem_watch;
16156658ffb8Spbrook                 } else {
16166658ffb8Spbrook                     env->watchpoint[i].is_ram = 1;
16176658ffb8Spbrook                     /* TODO: Figure out how to make read watchpoints coexist
16186658ffb8Spbrook                        with code.  */
16196658ffb8Spbrook                     pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
16206658ffb8Spbrook                 }
16216658ffb8Spbrook             }
16226658ffb8Spbrook         }
16236658ffb8Spbrook 
162490f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
16259fa3e853Sbellard         addend -= vaddr;
162684b7b8e7Sbellard         te = &env->tlb_table[is_user][index];
162784b7b8e7Sbellard         te->addend = addend;
162867b915a5Sbellard         if (prot & PAGE_READ) {
162984b7b8e7Sbellard             te->addr_read = address;
16309fa3e853Sbellard         } else {
163184b7b8e7Sbellard             te->addr_read = -1;
163284b7b8e7Sbellard         }
163384b7b8e7Sbellard         if (prot & PAGE_EXEC) {
163484b7b8e7Sbellard             te->addr_code = address;
163584b7b8e7Sbellard         } else {
163684b7b8e7Sbellard             te->addr_code = -1;
16379fa3e853Sbellard         }
163867b915a5Sbellard         if (prot & PAGE_WRITE) {
1639856074ecSbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1640856074ecSbellard                 (pd & IO_MEM_ROMD)) {
1641856074ecSbellard                 /* write access calls the I/O callback */
1642856074ecSbellard                 te->addr_write = vaddr |
1643856074ecSbellard                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
16443a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
16451ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
164684b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
16479fa3e853Sbellard             } else {
164884b7b8e7Sbellard                 te->addr_write = address;
16499fa3e853Sbellard             }
16509fa3e853Sbellard         } else {
165184b7b8e7Sbellard             te->addr_write = -1;
16529fa3e853Sbellard         }
16539fa3e853Sbellard     }
16549fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
16559fa3e853Sbellard     else {
16569fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
16579fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
16589fa3e853Sbellard                soft MMU */
16599fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
16609fa3e853Sbellard                 ret = 2;
16619fa3e853Sbellard         } else {
16629fa3e853Sbellard             void *map_addr;
166359817ccbSbellard 
166459817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
166559817ccbSbellard                 ret = 2;
166659817ccbSbellard             } else {
16679fa3e853Sbellard                 if (prot & PROT_WRITE) {
166859817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1669d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
167059817ccbSbellard                         first_tb ||
1671d720b93dSbellard #endif
167259817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
167359817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
16749fa3e853Sbellard                         /* ROM: we do as if code was inside */
16759fa3e853Sbellard                         /* if code is present, we only map as read only and save the
16769fa3e853Sbellard                            original mapping */
16779fa3e853Sbellard                         VirtPageDesc *vp;
16789fa3e853Sbellard 
167990f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
16809fa3e853Sbellard                         vp->phys_addr = pd;
16819fa3e853Sbellard                         vp->prot = prot;
16829fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
16839fa3e853Sbellard                         prot &= ~PAGE_WRITE;
16849fa3e853Sbellard                     }
16859fa3e853Sbellard                 }
16869fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
16879fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
16889fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
16899fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
16909fa3e853Sbellard                               paddr, vaddr);
16919fa3e853Sbellard                 }
16929fa3e853Sbellard             }
16939fa3e853Sbellard         }
169459817ccbSbellard     }
16959fa3e853Sbellard #endif
16969fa3e853Sbellard     return ret;
16979fa3e853Sbellard }
16989fa3e853Sbellard 
16999fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17009fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
170153a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
17029fa3e853Sbellard {
17039fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
17049fa3e853Sbellard     VirtPageDesc *vp;
17059fa3e853Sbellard 
17069fa3e853Sbellard #if defined(DEBUG_TLB)
17079fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
17089fa3e853Sbellard #endif
17099fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
171059817ccbSbellard 
171159817ccbSbellard     /* if it is not mapped, no need to worry here */
171259817ccbSbellard     if (addr >= MMAP_AREA_END)
171359817ccbSbellard         return 0;
17149fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
17159fa3e853Sbellard     if (!vp)
17169fa3e853Sbellard         return 0;
17179fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
17189fa3e853Sbellard        validates only the code TLB */
17199fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
17209fa3e853Sbellard         return 0;
17219fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
17229fa3e853Sbellard         return 0;
17239fa3e853Sbellard #if defined(DEBUG_TLB)
17249fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
17259fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
17269fa3e853Sbellard #endif
172759817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
172859817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
172959817ccbSbellard                   (unsigned long)addr, vp->prot);
1730d720b93dSbellard     /* set the dirty bit */
17310a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1732d720b93dSbellard     /* flush the code inside */
1733d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
17349fa3e853Sbellard     return 1;
17359fa3e853Sbellard #else
17369fa3e853Sbellard     return 0;
17379fa3e853Sbellard #endif
173833417e70Sbellard }
173933417e70Sbellard 
17400124311eSbellard #else
17410124311eSbellard 
1742ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
17430124311eSbellard {
17440124311eSbellard }
17450124311eSbellard 
17462e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
17470124311eSbellard {
17480124311eSbellard }
17490124311eSbellard 
175084b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
17512e12669aSbellard                       target_phys_addr_t paddr, int prot,
17529fa3e853Sbellard                       int is_user, int is_softmmu)
175333417e70Sbellard {
17549fa3e853Sbellard     return 0;
175533417e70Sbellard }
175633417e70Sbellard 
17579fa3e853Sbellard /* dump memory mappings */
17589fa3e853Sbellard void page_dump(FILE *f)
175933417e70Sbellard {
17609fa3e853Sbellard     unsigned long start, end;
17619fa3e853Sbellard     int i, j, prot, prot1;
17629fa3e853Sbellard     PageDesc *p;
17639fa3e853Sbellard 
17649fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
17659fa3e853Sbellard             "start", "end", "size", "prot");
17669fa3e853Sbellard     start = -1;
17679fa3e853Sbellard     end = -1;
17689fa3e853Sbellard     prot = 0;
17699fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
17709fa3e853Sbellard         if (i < L1_SIZE)
17719fa3e853Sbellard             p = l1_map[i];
17729fa3e853Sbellard         else
17739fa3e853Sbellard             p = NULL;
17749fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
177533417e70Sbellard             if (!p)
17769fa3e853Sbellard                 prot1 = 0;
17779fa3e853Sbellard             else
17789fa3e853Sbellard                 prot1 = p[j].flags;
17799fa3e853Sbellard             if (prot1 != prot) {
17809fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
17819fa3e853Sbellard                 if (start != -1) {
17829fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
17839fa3e853Sbellard                             start, end, end - start,
17849fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
17859fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
17869fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
178733417e70Sbellard                 }
17889fa3e853Sbellard                 if (prot1 != 0)
17899fa3e853Sbellard                     start = end;
17909fa3e853Sbellard                 else
17919fa3e853Sbellard                     start = -1;
17929fa3e853Sbellard                 prot = prot1;
17939fa3e853Sbellard             }
17949fa3e853Sbellard             if (!p)
17959fa3e853Sbellard                 break;
17969fa3e853Sbellard         }
17979fa3e853Sbellard     }
17989fa3e853Sbellard }
17999fa3e853Sbellard 
180053a5960aSpbrook int page_get_flags(target_ulong address)
18019fa3e853Sbellard {
18029fa3e853Sbellard     PageDesc *p;
18039fa3e853Sbellard 
18049fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
18059fa3e853Sbellard     if (!p)
18069fa3e853Sbellard         return 0;
18079fa3e853Sbellard     return p->flags;
18089fa3e853Sbellard }
18099fa3e853Sbellard 
18109fa3e853Sbellard /* modify the flags of a page and invalidate the code if
18119fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
18129fa3e853Sbellard    depending on PAGE_WRITE */
181353a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
18149fa3e853Sbellard {
18159fa3e853Sbellard     PageDesc *p;
181653a5960aSpbrook     target_ulong addr;
18179fa3e853Sbellard 
18189fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
18199fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
18209fa3e853Sbellard     if (flags & PAGE_WRITE)
18219fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
18229fa3e853Sbellard     spin_lock(&tb_lock);
18239fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
18249fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
18259fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
18269fa3e853Sbellard            inside */
18279fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
18289fa3e853Sbellard             (flags & PAGE_WRITE) &&
18299fa3e853Sbellard             p->first_tb) {
1830d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
18319fa3e853Sbellard         }
18329fa3e853Sbellard         p->flags = flags;
18339fa3e853Sbellard     }
18349fa3e853Sbellard     spin_unlock(&tb_lock);
18359fa3e853Sbellard }
18369fa3e853Sbellard 
18379fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
18389fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
183953a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
18409fa3e853Sbellard {
18419fa3e853Sbellard     unsigned int page_index, prot, pindex;
18429fa3e853Sbellard     PageDesc *p, *p1;
184353a5960aSpbrook     target_ulong host_start, host_end, addr;
18449fa3e853Sbellard 
184583fb7adfSbellard     host_start = address & qemu_host_page_mask;
18469fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
18479fa3e853Sbellard     p1 = page_find(page_index);
18489fa3e853Sbellard     if (!p1)
18499fa3e853Sbellard         return 0;
185083fb7adfSbellard     host_end = host_start + qemu_host_page_size;
18519fa3e853Sbellard     p = p1;
18529fa3e853Sbellard     prot = 0;
18539fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
18549fa3e853Sbellard         prot |= p->flags;
18559fa3e853Sbellard         p++;
18569fa3e853Sbellard     }
18579fa3e853Sbellard     /* if the page was really writable, then we change its
18589fa3e853Sbellard        protection back to writable */
18599fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
18609fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
18619fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
186253a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
18639fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
18649fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
18659fa3e853Sbellard             /* and since the content will be modified, we must invalidate
18669fa3e853Sbellard                the corresponding translated code. */
1867d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
18689fa3e853Sbellard #ifdef DEBUG_TB_CHECK
18699fa3e853Sbellard             tb_invalidate_check(address);
18709fa3e853Sbellard #endif
18719fa3e853Sbellard             return 1;
18729fa3e853Sbellard         }
18739fa3e853Sbellard     }
18749fa3e853Sbellard     return 0;
18759fa3e853Sbellard }
18769fa3e853Sbellard 
18779fa3e853Sbellard /* call this function when system calls directly modify a memory area */
187853a5960aSpbrook /* ??? This should be redundant now we have lock_user.  */
187953a5960aSpbrook void page_unprotect_range(target_ulong data, target_ulong data_size)
18809fa3e853Sbellard {
188153a5960aSpbrook     target_ulong start, end, addr;
18829fa3e853Sbellard 
188353a5960aSpbrook     start = data;
18849fa3e853Sbellard     end = start + data_size;
18859fa3e853Sbellard     start &= TARGET_PAGE_MASK;
18869fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
18879fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1888d720b93dSbellard         page_unprotect(addr, 0, NULL);
18899fa3e853Sbellard     }
18909fa3e853Sbellard }
18919fa3e853Sbellard 
18926a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
18936a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
18941ccde1cbSbellard {
18951ccde1cbSbellard }
18969fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
189733417e70Sbellard 
189833417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
189933417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
190033417e70Sbellard    io memory page */
19012e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
19022e12669aSbellard                                   unsigned long size,
19032e12669aSbellard                                   unsigned long phys_offset)
190433417e70Sbellard {
1905108c49b8Sbellard     target_phys_addr_t addr, end_addr;
190692e873b9Sbellard     PhysPageDesc *p;
19079d42037bSbellard     CPUState *env;
190833417e70Sbellard 
19095fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
191033417e70Sbellard     end_addr = start_addr + size;
19115fd386f6Sbellard     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1912108c49b8Sbellard         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
19139fa3e853Sbellard         p->phys_offset = phys_offset;
19142a4188a3Sbellard         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
19152a4188a3Sbellard             (phys_offset & IO_MEM_ROMD))
191633417e70Sbellard             phys_offset += TARGET_PAGE_SIZE;
191733417e70Sbellard     }
19189d42037bSbellard 
19199d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
19209d42037bSbellard        reset the modified entries */
19219d42037bSbellard     /* XXX: slow ! */
19229d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
19239d42037bSbellard         tlb_flush(env, 1);
19249d42037bSbellard     }
192533417e70Sbellard }
192633417e70Sbellard 
1927ba863458Sbellard /* XXX: temporary until new memory mapping API */
1928ba863458Sbellard uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1929ba863458Sbellard {
1930ba863458Sbellard     PhysPageDesc *p;
1931ba863458Sbellard 
1932ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1933ba863458Sbellard     if (!p)
1934ba863458Sbellard         return IO_MEM_UNASSIGNED;
1935ba863458Sbellard     return p->phys_offset;
1936ba863458Sbellard }
1937ba863458Sbellard 
1938e9a1ab19Sbellard /* XXX: better than nothing */
1939e9a1ab19Sbellard ram_addr_t qemu_ram_alloc(unsigned int size)
1940e9a1ab19Sbellard {
1941e9a1ab19Sbellard     ram_addr_t addr;
1942e9a1ab19Sbellard     if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1943e9a1ab19Sbellard         fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1944e9a1ab19Sbellard                 size, phys_ram_size);
1945e9a1ab19Sbellard         abort();
1946e9a1ab19Sbellard     }
1947e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
1948e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1949e9a1ab19Sbellard     return addr;
1950e9a1ab19Sbellard }
1951e9a1ab19Sbellard 
1952e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
1953e9a1ab19Sbellard {
1954e9a1ab19Sbellard }
1955e9a1ab19Sbellard 
1956a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
195733417e70Sbellard {
195867d3b957Spbrook #ifdef DEBUG_UNASSIGNED
195967d3b957Spbrook     printf("Unassigned mem read  0x%08x\n", (int)addr);
196067d3b957Spbrook #endif
196133417e70Sbellard     return 0;
196233417e70Sbellard }
196333417e70Sbellard 
1964a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
196533417e70Sbellard {
196667d3b957Spbrook #ifdef DEBUG_UNASSIGNED
196767d3b957Spbrook     printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
196867d3b957Spbrook #endif
196933417e70Sbellard }
197033417e70Sbellard 
197133417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
197233417e70Sbellard     unassigned_mem_readb,
197333417e70Sbellard     unassigned_mem_readb,
197433417e70Sbellard     unassigned_mem_readb,
197533417e70Sbellard };
197633417e70Sbellard 
197733417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
197833417e70Sbellard     unassigned_mem_writeb,
197933417e70Sbellard     unassigned_mem_writeb,
198033417e70Sbellard     unassigned_mem_writeb,
198133417e70Sbellard };
198233417e70Sbellard 
1983a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
19841ccde1cbSbellard {
19853a7d929eSbellard     unsigned long ram_addr;
19863a7d929eSbellard     int dirty_flags;
19873a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
19883a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
19893a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
19903a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
19913a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
19923a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
19933a7d929eSbellard #endif
19943a7d929eSbellard     }
1995c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
1996f32fc648Sbellard #ifdef USE_KQEMU
1997f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1998f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1999f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2000f32fc648Sbellard #endif
2001f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2002f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2003f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2004f23db169Sbellard        flushed */
2005f23db169Sbellard     if (dirty_flags == 0xff)
20066a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
20071ccde1cbSbellard }
20081ccde1cbSbellard 
2009a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
20101ccde1cbSbellard {
20113a7d929eSbellard     unsigned long ram_addr;
20123a7d929eSbellard     int dirty_flags;
20133a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
20143a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20153a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
20163a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
20173a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
20183a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20193a7d929eSbellard #endif
20203a7d929eSbellard     }
2021c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
2022f32fc648Sbellard #ifdef USE_KQEMU
2023f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2024f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2025f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2026f32fc648Sbellard #endif
2027f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2028f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2029f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2030f23db169Sbellard        flushed */
2031f23db169Sbellard     if (dirty_flags == 0xff)
20326a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
20331ccde1cbSbellard }
20341ccde1cbSbellard 
2035a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
20361ccde1cbSbellard {
20373a7d929eSbellard     unsigned long ram_addr;
20383a7d929eSbellard     int dirty_flags;
20393a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
20403a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20413a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
20423a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
20433a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
20443a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20453a7d929eSbellard #endif
20463a7d929eSbellard     }
2047c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
2048f32fc648Sbellard #ifdef USE_KQEMU
2049f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2050f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2051f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2052f32fc648Sbellard #endif
2053f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2054f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2055f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2056f23db169Sbellard        flushed */
2057f23db169Sbellard     if (dirty_flags == 0xff)
20586a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
20591ccde1cbSbellard }
20601ccde1cbSbellard 
20613a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
20623a7d929eSbellard     NULL, /* never used */
20633a7d929eSbellard     NULL, /* never used */
20643a7d929eSbellard     NULL, /* never used */
20653a7d929eSbellard };
20663a7d929eSbellard 
20671ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
20681ccde1cbSbellard     notdirty_mem_writeb,
20691ccde1cbSbellard     notdirty_mem_writew,
20701ccde1cbSbellard     notdirty_mem_writel,
20711ccde1cbSbellard };
20721ccde1cbSbellard 
20736658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
20746658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
20756658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
20766658ffb8Spbrook    phys routines.  */
20776658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
20786658ffb8Spbrook {
20796658ffb8Spbrook     return ldub_phys(addr);
20806658ffb8Spbrook }
20816658ffb8Spbrook 
20826658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
20836658ffb8Spbrook {
20846658ffb8Spbrook     return lduw_phys(addr);
20856658ffb8Spbrook }
20866658ffb8Spbrook 
20876658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
20886658ffb8Spbrook {
20896658ffb8Spbrook     return ldl_phys(addr);
20906658ffb8Spbrook }
20916658ffb8Spbrook 
20926658ffb8Spbrook /* Generate a debug exception if a watchpoint has been hit.
20936658ffb8Spbrook    Returns the real physical address of the access.  addr will be a host
20946658ffb8Spbrook    address in the is_ram case.  */
20956658ffb8Spbrook static target_ulong check_watchpoint(target_phys_addr_t addr)
20966658ffb8Spbrook {
20976658ffb8Spbrook     CPUState *env = cpu_single_env;
20986658ffb8Spbrook     target_ulong watch;
20996658ffb8Spbrook     target_ulong retaddr;
21006658ffb8Spbrook     int i;
21016658ffb8Spbrook 
21026658ffb8Spbrook     retaddr = addr;
21036658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
21046658ffb8Spbrook         watch = env->watchpoint[i].vaddr;
21056658ffb8Spbrook         if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
21066658ffb8Spbrook             if (env->watchpoint[i].is_ram)
21076658ffb8Spbrook                 retaddr = addr - (unsigned long)phys_ram_base;
21086658ffb8Spbrook             if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
21096658ffb8Spbrook                 cpu_single_env->watchpoint_hit = i + 1;
21106658ffb8Spbrook                 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
21116658ffb8Spbrook                 break;
21126658ffb8Spbrook             }
21136658ffb8Spbrook         }
21146658ffb8Spbrook     }
21156658ffb8Spbrook     return retaddr;
21166658ffb8Spbrook }
21176658ffb8Spbrook 
21186658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
21196658ffb8Spbrook                              uint32_t val)
21206658ffb8Spbrook {
21216658ffb8Spbrook     addr = check_watchpoint(addr);
21226658ffb8Spbrook     stb_phys(addr, val);
21236658ffb8Spbrook }
21246658ffb8Spbrook 
21256658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
21266658ffb8Spbrook                              uint32_t val)
21276658ffb8Spbrook {
21286658ffb8Spbrook     addr = check_watchpoint(addr);
21296658ffb8Spbrook     stw_phys(addr, val);
21306658ffb8Spbrook }
21316658ffb8Spbrook 
21326658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
21336658ffb8Spbrook                              uint32_t val)
21346658ffb8Spbrook {
21356658ffb8Spbrook     addr = check_watchpoint(addr);
21366658ffb8Spbrook     stl_phys(addr, val);
21376658ffb8Spbrook }
21386658ffb8Spbrook 
21396658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
21406658ffb8Spbrook     watch_mem_readb,
21416658ffb8Spbrook     watch_mem_readw,
21426658ffb8Spbrook     watch_mem_readl,
21436658ffb8Spbrook };
21446658ffb8Spbrook 
21456658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
21466658ffb8Spbrook     watch_mem_writeb,
21476658ffb8Spbrook     watch_mem_writew,
21486658ffb8Spbrook     watch_mem_writel,
21496658ffb8Spbrook };
21506658ffb8Spbrook #endif
21516658ffb8Spbrook 
215233417e70Sbellard static void io_mem_init(void)
215333417e70Sbellard {
21543a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2155a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
21563a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
21571ccde1cbSbellard     io_mem_nb = 5;
21581ccde1cbSbellard 
21596658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
21606658ffb8Spbrook     io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
21616658ffb8Spbrook                                           watch_mem_write, NULL);
21626658ffb8Spbrook #endif
21631ccde1cbSbellard     /* alloc dirty bits array */
21640a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
21653a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
216633417e70Sbellard }
216733417e70Sbellard 
216833417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
216933417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
217033417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
217133417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
217233417e70Sbellard    allocated. The return value can be used with
217333417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
217433417e70Sbellard int cpu_register_io_memory(int io_index,
217533417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2176a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2177a4193c8aSbellard                            void *opaque)
217833417e70Sbellard {
217933417e70Sbellard     int i;
218033417e70Sbellard 
218133417e70Sbellard     if (io_index <= 0) {
2182b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
218333417e70Sbellard             return -1;
218433417e70Sbellard         io_index = io_mem_nb++;
218533417e70Sbellard     } else {
218633417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
218733417e70Sbellard             return -1;
218833417e70Sbellard     }
218933417e70Sbellard 
219033417e70Sbellard     for(i = 0;i < 3; i++) {
219133417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
219233417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
219333417e70Sbellard     }
2194a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
219533417e70Sbellard     return io_index << IO_MEM_SHIFT;
219633417e70Sbellard }
219761382a50Sbellard 
21988926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
21998926b517Sbellard {
22008926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
22018926b517Sbellard }
22028926b517Sbellard 
22038926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
22048926b517Sbellard {
22058926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
22068926b517Sbellard }
22078926b517Sbellard 
220813eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
220913eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
22102e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
221113eb76e0Sbellard                             int len, int is_write)
221213eb76e0Sbellard {
221313eb76e0Sbellard     int l, flags;
221413eb76e0Sbellard     target_ulong page;
221553a5960aSpbrook     void * p;
221613eb76e0Sbellard 
221713eb76e0Sbellard     while (len > 0) {
221813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
221913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
222013eb76e0Sbellard         if (l > len)
222113eb76e0Sbellard             l = len;
222213eb76e0Sbellard         flags = page_get_flags(page);
222313eb76e0Sbellard         if (!(flags & PAGE_VALID))
222413eb76e0Sbellard             return;
222513eb76e0Sbellard         if (is_write) {
222613eb76e0Sbellard             if (!(flags & PAGE_WRITE))
222713eb76e0Sbellard                 return;
222853a5960aSpbrook             p = lock_user(addr, len, 0);
222953a5960aSpbrook             memcpy(p, buf, len);
223053a5960aSpbrook             unlock_user(p, addr, len);
223113eb76e0Sbellard         } else {
223213eb76e0Sbellard             if (!(flags & PAGE_READ))
223313eb76e0Sbellard                 return;
223453a5960aSpbrook             p = lock_user(addr, len, 1);
223553a5960aSpbrook             memcpy(buf, p, len);
223653a5960aSpbrook             unlock_user(p, addr, 0);
223713eb76e0Sbellard         }
223813eb76e0Sbellard         len -= l;
223913eb76e0Sbellard         buf += l;
224013eb76e0Sbellard         addr += l;
224113eb76e0Sbellard     }
224213eb76e0Sbellard }
22438df1cd07Sbellard 
224413eb76e0Sbellard #else
22452e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
224613eb76e0Sbellard                             int len, int is_write)
224713eb76e0Sbellard {
224813eb76e0Sbellard     int l, io_index;
224913eb76e0Sbellard     uint8_t *ptr;
225013eb76e0Sbellard     uint32_t val;
22512e12669aSbellard     target_phys_addr_t page;
22522e12669aSbellard     unsigned long pd;
225392e873b9Sbellard     PhysPageDesc *p;
225413eb76e0Sbellard 
225513eb76e0Sbellard     while (len > 0) {
225613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
225713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
225813eb76e0Sbellard         if (l > len)
225913eb76e0Sbellard             l = len;
226092e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
226113eb76e0Sbellard         if (!p) {
226213eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
226313eb76e0Sbellard         } else {
226413eb76e0Sbellard             pd = p->phys_offset;
226513eb76e0Sbellard         }
226613eb76e0Sbellard 
226713eb76e0Sbellard         if (is_write) {
22683a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
226913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
22706a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
22716a00d601Sbellard                    potential bugs */
227213eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
22731c213d19Sbellard                     /* 32 bit write access */
2274c27004ecSbellard                     val = ldl_p(buf);
2275a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
227613eb76e0Sbellard                     l = 4;
227713eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
22781c213d19Sbellard                     /* 16 bit write access */
2279c27004ecSbellard                     val = lduw_p(buf);
2280a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
228113eb76e0Sbellard                     l = 2;
228213eb76e0Sbellard                 } else {
22831c213d19Sbellard                     /* 8 bit write access */
2284c27004ecSbellard                     val = ldub_p(buf);
2285a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
228613eb76e0Sbellard                     l = 1;
228713eb76e0Sbellard                 }
228813eb76e0Sbellard             } else {
2289b448f2f3Sbellard                 unsigned long addr1;
2290b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
229113eb76e0Sbellard                 /* RAM case */
2292b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
229313eb76e0Sbellard                 memcpy(ptr, buf, l);
22943a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2295b448f2f3Sbellard                     /* invalidate code */
2296b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2297b448f2f3Sbellard                     /* set dirty bit */
2298f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2299f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
230013eb76e0Sbellard                 }
23013a7d929eSbellard             }
230213eb76e0Sbellard         } else {
23032a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
23042a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
230513eb76e0Sbellard                 /* I/O case */
230613eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
230713eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
230813eb76e0Sbellard                     /* 32 bit read access */
2309a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2310c27004ecSbellard                     stl_p(buf, val);
231113eb76e0Sbellard                     l = 4;
231213eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
231313eb76e0Sbellard                     /* 16 bit read access */
2314a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2315c27004ecSbellard                     stw_p(buf, val);
231613eb76e0Sbellard                     l = 2;
231713eb76e0Sbellard                 } else {
23181c213d19Sbellard                     /* 8 bit read access */
2319a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2320c27004ecSbellard                     stb_p(buf, val);
232113eb76e0Sbellard                     l = 1;
232213eb76e0Sbellard                 }
232313eb76e0Sbellard             } else {
232413eb76e0Sbellard                 /* RAM case */
232513eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
232613eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
232713eb76e0Sbellard                 memcpy(buf, ptr, l);
232813eb76e0Sbellard             }
232913eb76e0Sbellard         }
233013eb76e0Sbellard         len -= l;
233113eb76e0Sbellard         buf += l;
233213eb76e0Sbellard         addr += l;
233313eb76e0Sbellard     }
233413eb76e0Sbellard }
23358df1cd07Sbellard 
2336d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2337d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2338d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2339d0ecd2aaSbellard {
2340d0ecd2aaSbellard     int l;
2341d0ecd2aaSbellard     uint8_t *ptr;
2342d0ecd2aaSbellard     target_phys_addr_t page;
2343d0ecd2aaSbellard     unsigned long pd;
2344d0ecd2aaSbellard     PhysPageDesc *p;
2345d0ecd2aaSbellard 
2346d0ecd2aaSbellard     while (len > 0) {
2347d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2348d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2349d0ecd2aaSbellard         if (l > len)
2350d0ecd2aaSbellard             l = len;
2351d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2352d0ecd2aaSbellard         if (!p) {
2353d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2354d0ecd2aaSbellard         } else {
2355d0ecd2aaSbellard             pd = p->phys_offset;
2356d0ecd2aaSbellard         }
2357d0ecd2aaSbellard 
2358d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
23592a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
23602a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2361d0ecd2aaSbellard             /* do nothing */
2362d0ecd2aaSbellard         } else {
2363d0ecd2aaSbellard             unsigned long addr1;
2364d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2365d0ecd2aaSbellard             /* ROM/RAM case */
2366d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2367d0ecd2aaSbellard             memcpy(ptr, buf, l);
2368d0ecd2aaSbellard         }
2369d0ecd2aaSbellard         len -= l;
2370d0ecd2aaSbellard         buf += l;
2371d0ecd2aaSbellard         addr += l;
2372d0ecd2aaSbellard     }
2373d0ecd2aaSbellard }
2374d0ecd2aaSbellard 
2375d0ecd2aaSbellard 
23768df1cd07Sbellard /* warning: addr must be aligned */
23778df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
23788df1cd07Sbellard {
23798df1cd07Sbellard     int io_index;
23808df1cd07Sbellard     uint8_t *ptr;
23818df1cd07Sbellard     uint32_t val;
23828df1cd07Sbellard     unsigned long pd;
23838df1cd07Sbellard     PhysPageDesc *p;
23848df1cd07Sbellard 
23858df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
23868df1cd07Sbellard     if (!p) {
23878df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
23888df1cd07Sbellard     } else {
23898df1cd07Sbellard         pd = p->phys_offset;
23908df1cd07Sbellard     }
23918df1cd07Sbellard 
23922a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
23932a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
23948df1cd07Sbellard         /* I/O case */
23958df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
23968df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
23978df1cd07Sbellard     } else {
23988df1cd07Sbellard         /* RAM case */
23998df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
24008df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
24018df1cd07Sbellard         val = ldl_p(ptr);
24028df1cd07Sbellard     }
24038df1cd07Sbellard     return val;
24048df1cd07Sbellard }
24058df1cd07Sbellard 
240684b7b8e7Sbellard /* warning: addr must be aligned */
240784b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
240884b7b8e7Sbellard {
240984b7b8e7Sbellard     int io_index;
241084b7b8e7Sbellard     uint8_t *ptr;
241184b7b8e7Sbellard     uint64_t val;
241284b7b8e7Sbellard     unsigned long pd;
241384b7b8e7Sbellard     PhysPageDesc *p;
241484b7b8e7Sbellard 
241584b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
241684b7b8e7Sbellard     if (!p) {
241784b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
241884b7b8e7Sbellard     } else {
241984b7b8e7Sbellard         pd = p->phys_offset;
242084b7b8e7Sbellard     }
242184b7b8e7Sbellard 
24222a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
24232a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
242484b7b8e7Sbellard         /* I/O case */
242584b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
242684b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
242784b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
242884b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
242984b7b8e7Sbellard #else
243084b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
243184b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
243284b7b8e7Sbellard #endif
243384b7b8e7Sbellard     } else {
243484b7b8e7Sbellard         /* RAM case */
243584b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
243684b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
243784b7b8e7Sbellard         val = ldq_p(ptr);
243884b7b8e7Sbellard     }
243984b7b8e7Sbellard     return val;
244084b7b8e7Sbellard }
244184b7b8e7Sbellard 
2442aab33094Sbellard /* XXX: optimize */
2443aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2444aab33094Sbellard {
2445aab33094Sbellard     uint8_t val;
2446aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2447aab33094Sbellard     return val;
2448aab33094Sbellard }
2449aab33094Sbellard 
2450aab33094Sbellard /* XXX: optimize */
2451aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2452aab33094Sbellard {
2453aab33094Sbellard     uint16_t val;
2454aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2455aab33094Sbellard     return tswap16(val);
2456aab33094Sbellard }
2457aab33094Sbellard 
24588df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
24598df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
24608df1cd07Sbellard    bits are used to track modified PTEs */
24618df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
24628df1cd07Sbellard {
24638df1cd07Sbellard     int io_index;
24648df1cd07Sbellard     uint8_t *ptr;
24658df1cd07Sbellard     unsigned long pd;
24668df1cd07Sbellard     PhysPageDesc *p;
24678df1cd07Sbellard 
24688df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
24698df1cd07Sbellard     if (!p) {
24708df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
24718df1cd07Sbellard     } else {
24728df1cd07Sbellard         pd = p->phys_offset;
24738df1cd07Sbellard     }
24748df1cd07Sbellard 
24753a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
24768df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
24778df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
24788df1cd07Sbellard     } else {
24798df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
24808df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
24818df1cd07Sbellard         stl_p(ptr, val);
24828df1cd07Sbellard     }
24838df1cd07Sbellard }
24848df1cd07Sbellard 
2485bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2486bc98a7efSj_mayer {
2487bc98a7efSj_mayer     int io_index;
2488bc98a7efSj_mayer     uint8_t *ptr;
2489bc98a7efSj_mayer     unsigned long pd;
2490bc98a7efSj_mayer     PhysPageDesc *p;
2491bc98a7efSj_mayer 
2492bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2493bc98a7efSj_mayer     if (!p) {
2494bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
2495bc98a7efSj_mayer     } else {
2496bc98a7efSj_mayer         pd = p->phys_offset;
2497bc98a7efSj_mayer     }
2498bc98a7efSj_mayer 
2499bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2500bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2501bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
2502bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2503bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2504bc98a7efSj_mayer #else
2505bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2507bc98a7efSj_mayer #endif
2508bc98a7efSj_mayer     } else {
2509bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2510bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
2511bc98a7efSj_mayer         stq_p(ptr, val);
2512bc98a7efSj_mayer     }
2513bc98a7efSj_mayer }
2514bc98a7efSj_mayer 
25158df1cd07Sbellard /* warning: addr must be aligned */
25168df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
25178df1cd07Sbellard {
25188df1cd07Sbellard     int io_index;
25198df1cd07Sbellard     uint8_t *ptr;
25208df1cd07Sbellard     unsigned long pd;
25218df1cd07Sbellard     PhysPageDesc *p;
25228df1cd07Sbellard 
25238df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
25248df1cd07Sbellard     if (!p) {
25258df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
25268df1cd07Sbellard     } else {
25278df1cd07Sbellard         pd = p->phys_offset;
25288df1cd07Sbellard     }
25298df1cd07Sbellard 
25303a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
25318df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
25328df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
25338df1cd07Sbellard     } else {
25348df1cd07Sbellard         unsigned long addr1;
25358df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
25368df1cd07Sbellard         /* RAM case */
25378df1cd07Sbellard         ptr = phys_ram_base + addr1;
25388df1cd07Sbellard         stl_p(ptr, val);
25393a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
25408df1cd07Sbellard             /* invalidate code */
25418df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
25428df1cd07Sbellard             /* set dirty bit */
2543f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2544f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
25458df1cd07Sbellard         }
25468df1cd07Sbellard     }
25473a7d929eSbellard }
25488df1cd07Sbellard 
2549aab33094Sbellard /* XXX: optimize */
2550aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2551aab33094Sbellard {
2552aab33094Sbellard     uint8_t v = val;
2553aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2554aab33094Sbellard }
2555aab33094Sbellard 
2556aab33094Sbellard /* XXX: optimize */
2557aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2558aab33094Sbellard {
2559aab33094Sbellard     uint16_t v = tswap16(val);
2560aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2561aab33094Sbellard }
2562aab33094Sbellard 
2563aab33094Sbellard /* XXX: optimize */
2564aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2565aab33094Sbellard {
2566aab33094Sbellard     val = tswap64(val);
2567aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2568aab33094Sbellard }
2569aab33094Sbellard 
257013eb76e0Sbellard #endif
257113eb76e0Sbellard 
257213eb76e0Sbellard /* virtual memory access for debug */
2573b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2574b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
257513eb76e0Sbellard {
257613eb76e0Sbellard     int l;
257713eb76e0Sbellard     target_ulong page, phys_addr;
257813eb76e0Sbellard 
257913eb76e0Sbellard     while (len > 0) {
258013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
258113eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
258213eb76e0Sbellard         /* if no physical page mapped, return an error */
258313eb76e0Sbellard         if (phys_addr == -1)
258413eb76e0Sbellard             return -1;
258513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
258613eb76e0Sbellard         if (l > len)
258713eb76e0Sbellard             l = len;
2588b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2589b448f2f3Sbellard                                buf, l, is_write);
259013eb76e0Sbellard         len -= l;
259113eb76e0Sbellard         buf += l;
259213eb76e0Sbellard         addr += l;
259313eb76e0Sbellard     }
259413eb76e0Sbellard     return 0;
259513eb76e0Sbellard }
259613eb76e0Sbellard 
2597e3db7226Sbellard void dump_exec_info(FILE *f,
2598e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2599e3db7226Sbellard {
2600e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2601e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2602e3db7226Sbellard     TranslationBlock *tb;
2603e3db7226Sbellard 
2604e3db7226Sbellard     target_code_size = 0;
2605e3db7226Sbellard     max_target_code_size = 0;
2606e3db7226Sbellard     cross_page = 0;
2607e3db7226Sbellard     direct_jmp_count = 0;
2608e3db7226Sbellard     direct_jmp2_count = 0;
2609e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2610e3db7226Sbellard         tb = &tbs[i];
2611e3db7226Sbellard         target_code_size += tb->size;
2612e3db7226Sbellard         if (tb->size > max_target_code_size)
2613e3db7226Sbellard             max_target_code_size = tb->size;
2614e3db7226Sbellard         if (tb->page_addr[1] != -1)
2615e3db7226Sbellard             cross_page++;
2616e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2617e3db7226Sbellard             direct_jmp_count++;
2618e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2619e3db7226Sbellard                 direct_jmp2_count++;
2620e3db7226Sbellard             }
2621e3db7226Sbellard         }
2622e3db7226Sbellard     }
2623e3db7226Sbellard     /* XXX: avoid using doubles ? */
2624e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2625e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2626e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2627e3db7226Sbellard                 max_target_code_size);
2628e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2629e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2630e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2631e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2632e3db7226Sbellard             cross_page,
2633e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2634e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2635e3db7226Sbellard                 direct_jmp_count,
2636e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2637e3db7226Sbellard                 direct_jmp2_count,
2638e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2639e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2640e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2641e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2642e3db7226Sbellard }
2643e3db7226Sbellard 
264461382a50Sbellard #if !defined(CONFIG_USER_ONLY)
264561382a50Sbellard 
264661382a50Sbellard #define MMUSUFFIX _cmmu
264761382a50Sbellard #define GETPC() NULL
264861382a50Sbellard #define env cpu_single_env
2649b769d8feSbellard #define SOFTMMU_CODE_ACCESS
265061382a50Sbellard 
265161382a50Sbellard #define SHIFT 0
265261382a50Sbellard #include "softmmu_template.h"
265361382a50Sbellard 
265461382a50Sbellard #define SHIFT 1
265561382a50Sbellard #include "softmmu_template.h"
265661382a50Sbellard 
265761382a50Sbellard #define SHIFT 2
265861382a50Sbellard #include "softmmu_template.h"
265961382a50Sbellard 
266061382a50Sbellard #define SHIFT 3
266161382a50Sbellard #include "softmmu_template.h"
266261382a50Sbellard 
266361382a50Sbellard #undef env
266461382a50Sbellard 
266561382a50Sbellard #endif
2666