xref: /qemu/system/physmem.c (revision 706cd4b547db5c27585b6125a43663aba3404dfe)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
3953a5960aSpbrook #endif
4054936004Sbellard 
41fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4266e85a21Sbellard //#define DEBUG_FLUSH
439fa3e853Sbellard //#define DEBUG_TLB
44fd6ce8f6Sbellard 
45fd6ce8f6Sbellard /* make various TB consistency checks */
46fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4798857888Sbellard //#define DEBUG_TLB_CHECK
48fd6ce8f6Sbellard 
49fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
50fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51fd6ce8f6Sbellard 
529fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
539fa3e853Sbellard 
549fa3e853Sbellard #define MMAP_AREA_START        0x00000000
559fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
56fd6ce8f6Sbellard 
57108c49b8Sbellard #if defined(TARGET_SPARC64)
58108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
59108c49b8Sbellard #elif defined(TARGET_PPC64)
60108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
61108c49b8Sbellard #else
62108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
64108c49b8Sbellard #endif
65108c49b8Sbellard 
66fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
679fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68fd6ce8f6Sbellard int nb_tbs;
69eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
70eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
71fd6ce8f6Sbellard 
72b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
73fd6ce8f6Sbellard uint8_t *code_gen_ptr;
74fd6ce8f6Sbellard 
759fa3e853Sbellard int phys_ram_size;
769fa3e853Sbellard int phys_ram_fd;
779fa3e853Sbellard uint8_t *phys_ram_base;
781ccde1cbSbellard uint8_t *phys_ram_dirty;
799fa3e853Sbellard 
806a00d601Sbellard CPUState *first_cpu;
816a00d601Sbellard /* current CPU in the current thread. It is only valid inside
826a00d601Sbellard    cpu_exec() */
836a00d601Sbellard CPUState *cpu_single_env;
846a00d601Sbellard 
8554936004Sbellard typedef struct PageDesc {
8692e873b9Sbellard     /* list of TBs intersecting this ram page */
87fd6ce8f6Sbellard     TranslationBlock *first_tb;
889fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
899fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
909fa3e853Sbellard     unsigned int code_write_count;
919fa3e853Sbellard     uint8_t *code_bitmap;
929fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
939fa3e853Sbellard     unsigned long flags;
949fa3e853Sbellard #endif
9554936004Sbellard } PageDesc;
9654936004Sbellard 
9792e873b9Sbellard typedef struct PhysPageDesc {
9892e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
99e04f40b5Sbellard     uint32_t phys_offset;
10092e873b9Sbellard } PhysPageDesc;
10192e873b9Sbellard 
10254936004Sbellard #define L2_BITS 10
10354936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
10454936004Sbellard 
10554936004Sbellard #define L1_SIZE (1 << L1_BITS)
10654936004Sbellard #define L2_SIZE (1 << L2_BITS)
10754936004Sbellard 
10833417e70Sbellard static void io_mem_init(void);
109fd6ce8f6Sbellard 
11083fb7adfSbellard unsigned long qemu_real_host_page_size;
11183fb7adfSbellard unsigned long qemu_host_page_bits;
11283fb7adfSbellard unsigned long qemu_host_page_size;
11383fb7adfSbellard unsigned long qemu_host_page_mask;
11454936004Sbellard 
11592e873b9Sbellard /* XXX: for system emulation, it could just be an array */
11654936004Sbellard static PageDesc *l1_map[L1_SIZE];
1170a962c02Sbellard PhysPageDesc **l1_phys_map;
11854936004Sbellard 
11933417e70Sbellard /* io memory support */
12033417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
12133417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
12333417e70Sbellard static int io_mem_nb;
12433417e70Sbellard 
12534865134Sbellard /* log support */
12634865134Sbellard char *logfilename = "/tmp/qemu.log";
12734865134Sbellard FILE *logfile;
12834865134Sbellard int loglevel;
12934865134Sbellard 
130e3db7226Sbellard /* statistics */
131e3db7226Sbellard static int tlb_flush_count;
132e3db7226Sbellard static int tb_flush_count;
133e3db7226Sbellard static int tb_phys_invalidate_count;
134e3db7226Sbellard 
135b346ff46Sbellard static void page_init(void)
13654936004Sbellard {
13783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
13854936004Sbellard        TARGET_PAGE_SIZE */
13967b915a5Sbellard #ifdef _WIN32
140d5a8f07cSbellard     {
141d5a8f07cSbellard         SYSTEM_INFO system_info;
142d5a8f07cSbellard         DWORD old_protect;
143d5a8f07cSbellard 
144d5a8f07cSbellard         GetSystemInfo(&system_info);
145d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
146d5a8f07cSbellard 
147d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
149d5a8f07cSbellard     }
15067b915a5Sbellard #else
15183fb7adfSbellard     qemu_real_host_page_size = getpagesize();
152d5a8f07cSbellard     {
153d5a8f07cSbellard         unsigned long start, end;
154d5a8f07cSbellard 
155d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
156d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
157d5a8f07cSbellard 
158d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
160d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
161d5a8f07cSbellard 
162d5a8f07cSbellard         mprotect((void *)start, end - start,
163d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
164d5a8f07cSbellard     }
16567b915a5Sbellard #endif
166d5a8f07cSbellard 
16783fb7adfSbellard     if (qemu_host_page_size == 0)
16883fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
16983fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
17083fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
17183fb7adfSbellard     qemu_host_page_bits = 0;
17283fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
17383fb7adfSbellard         qemu_host_page_bits++;
17483fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
175108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
17754936004Sbellard }
17854936004Sbellard 
179fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
18054936004Sbellard {
18154936004Sbellard     PageDesc **lp, *p;
18254936004Sbellard 
18354936004Sbellard     lp = &l1_map[index >> L2_BITS];
18454936004Sbellard     p = *lp;
18554936004Sbellard     if (!p) {
18654936004Sbellard         /* allocate if not found */
18759817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
188fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
18954936004Sbellard         *lp = p;
19054936004Sbellard     }
19154936004Sbellard     return p + (index & (L2_SIZE - 1));
19254936004Sbellard }
19354936004Sbellard 
194fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
19554936004Sbellard {
19654936004Sbellard     PageDesc *p;
19754936004Sbellard 
19854936004Sbellard     p = l1_map[index >> L2_BITS];
19954936004Sbellard     if (!p)
20054936004Sbellard         return 0;
201fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
20254936004Sbellard }
20354936004Sbellard 
204108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
20592e873b9Sbellard {
206108c49b8Sbellard     void **lp, **p;
20792e873b9Sbellard 
208108c49b8Sbellard     p = (void **)l1_phys_map;
209108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
210108c49b8Sbellard 
211108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
212108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
213108c49b8Sbellard #endif
214108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
21592e873b9Sbellard     p = *lp;
21692e873b9Sbellard     if (!p) {
21792e873b9Sbellard         /* allocate if not found */
218108c49b8Sbellard         if (!alloc)
219108c49b8Sbellard             return NULL;
220108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
221108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
222108c49b8Sbellard         *lp = p;
223108c49b8Sbellard     }
224108c49b8Sbellard #endif
225108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
226108c49b8Sbellard     p = *lp;
227108c49b8Sbellard     if (!p) {
228108c49b8Sbellard         /* allocate if not found */
229108c49b8Sbellard         if (!alloc)
230108c49b8Sbellard             return NULL;
2310a962c02Sbellard         p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
23292e873b9Sbellard         memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
23392e873b9Sbellard         *lp = p;
23492e873b9Sbellard     }
235108c49b8Sbellard     return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
23692e873b9Sbellard }
23792e873b9Sbellard 
238108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
23992e873b9Sbellard {
240108c49b8Sbellard     return phys_page_find_alloc(index, 0);
24192e873b9Sbellard }
24292e873b9Sbellard 
2439fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2446a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
2453a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2463a7d929eSbellard                                     target_ulong vaddr);
2479fa3e853Sbellard #endif
248fd6ce8f6Sbellard 
2496a00d601Sbellard void cpu_exec_init(CPUState *env)
250fd6ce8f6Sbellard {
2516a00d601Sbellard     CPUState **penv;
2526a00d601Sbellard     int cpu_index;
2536a00d601Sbellard 
254fd6ce8f6Sbellard     if (!code_gen_ptr) {
255fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
256b346ff46Sbellard         page_init();
25733417e70Sbellard         io_mem_init();
258fd6ce8f6Sbellard     }
2596a00d601Sbellard     env->next_cpu = NULL;
2606a00d601Sbellard     penv = &first_cpu;
2616a00d601Sbellard     cpu_index = 0;
2626a00d601Sbellard     while (*penv != NULL) {
2636a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
2646a00d601Sbellard         cpu_index++;
2656a00d601Sbellard     }
2666a00d601Sbellard     env->cpu_index = cpu_index;
2676a00d601Sbellard     *penv = env;
268fd6ce8f6Sbellard }
269fd6ce8f6Sbellard 
2709fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
2719fa3e853Sbellard {
2729fa3e853Sbellard     if (p->code_bitmap) {
27359817ccbSbellard         qemu_free(p->code_bitmap);
2749fa3e853Sbellard         p->code_bitmap = NULL;
2759fa3e853Sbellard     }
2769fa3e853Sbellard     p->code_write_count = 0;
2779fa3e853Sbellard }
2789fa3e853Sbellard 
279fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
280fd6ce8f6Sbellard static void page_flush_tb(void)
281fd6ce8f6Sbellard {
282fd6ce8f6Sbellard     int i, j;
283fd6ce8f6Sbellard     PageDesc *p;
284fd6ce8f6Sbellard 
285fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
286fd6ce8f6Sbellard         p = l1_map[i];
287fd6ce8f6Sbellard         if (p) {
2889fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
2899fa3e853Sbellard                 p->first_tb = NULL;
2909fa3e853Sbellard                 invalidate_page_bitmap(p);
2919fa3e853Sbellard                 p++;
2929fa3e853Sbellard             }
293fd6ce8f6Sbellard         }
294fd6ce8f6Sbellard     }
295fd6ce8f6Sbellard }
296fd6ce8f6Sbellard 
297fd6ce8f6Sbellard /* flush all the translation blocks */
298d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
2996a00d601Sbellard void tb_flush(CPUState *env1)
300fd6ce8f6Sbellard {
3016a00d601Sbellard     CPUState *env;
3020124311eSbellard #if defined(DEBUG_FLUSH)
303fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
304fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
305fd6ce8f6Sbellard            nb_tbs,
3060124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
307fd6ce8f6Sbellard #endif
308fd6ce8f6Sbellard     nb_tbs = 0;
3096a00d601Sbellard 
3106a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3118a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3126a00d601Sbellard     }
3139fa3e853Sbellard 
3148a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
315fd6ce8f6Sbellard     page_flush_tb();
3169fa3e853Sbellard 
317fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
318d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
319d4e8164fSbellard        expensive */
320e3db7226Sbellard     tb_flush_count++;
321fd6ce8f6Sbellard }
322fd6ce8f6Sbellard 
323fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
324fd6ce8f6Sbellard 
325fd6ce8f6Sbellard static void tb_invalidate_check(unsigned long address)
326fd6ce8f6Sbellard {
327fd6ce8f6Sbellard     TranslationBlock *tb;
328fd6ce8f6Sbellard     int i;
329fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
330fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
331fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
332fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
333fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
334fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
335fd6ce8f6Sbellard                        address, tb->pc, tb->size);
336fd6ce8f6Sbellard             }
337fd6ce8f6Sbellard         }
338fd6ce8f6Sbellard     }
339fd6ce8f6Sbellard }
340fd6ce8f6Sbellard 
341fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
342fd6ce8f6Sbellard static void tb_page_check(void)
343fd6ce8f6Sbellard {
344fd6ce8f6Sbellard     TranslationBlock *tb;
345fd6ce8f6Sbellard     int i, flags1, flags2;
346fd6ce8f6Sbellard 
347fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
348fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
349fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
350fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
351fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
352fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
353fd6ce8f6Sbellard                        tb->pc, tb->size, flags1, flags2);
354fd6ce8f6Sbellard             }
355fd6ce8f6Sbellard         }
356fd6ce8f6Sbellard     }
357fd6ce8f6Sbellard }
358fd6ce8f6Sbellard 
359d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
360d4e8164fSbellard {
361d4e8164fSbellard     TranslationBlock *tb1;
362d4e8164fSbellard     unsigned int n1;
363d4e8164fSbellard 
364d4e8164fSbellard     /* suppress any remaining jumps to this TB */
365d4e8164fSbellard     tb1 = tb->jmp_first;
366d4e8164fSbellard     for(;;) {
367d4e8164fSbellard         n1 = (long)tb1 & 3;
368d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
369d4e8164fSbellard         if (n1 == 2)
370d4e8164fSbellard             break;
371d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
372d4e8164fSbellard     }
373d4e8164fSbellard     /* check end of list */
374d4e8164fSbellard     if (tb1 != tb) {
375d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
376d4e8164fSbellard     }
377d4e8164fSbellard }
378d4e8164fSbellard 
379fd6ce8f6Sbellard #endif
380fd6ce8f6Sbellard 
381fd6ce8f6Sbellard /* invalidate one TB */
382fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
383fd6ce8f6Sbellard                              int next_offset)
384fd6ce8f6Sbellard {
385fd6ce8f6Sbellard     TranslationBlock *tb1;
386fd6ce8f6Sbellard     for(;;) {
387fd6ce8f6Sbellard         tb1 = *ptb;
388fd6ce8f6Sbellard         if (tb1 == tb) {
389fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
390fd6ce8f6Sbellard             break;
391fd6ce8f6Sbellard         }
392fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
393fd6ce8f6Sbellard     }
394fd6ce8f6Sbellard }
395fd6ce8f6Sbellard 
3969fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
3979fa3e853Sbellard {
3989fa3e853Sbellard     TranslationBlock *tb1;
3999fa3e853Sbellard     unsigned int n1;
4009fa3e853Sbellard 
4019fa3e853Sbellard     for(;;) {
4029fa3e853Sbellard         tb1 = *ptb;
4039fa3e853Sbellard         n1 = (long)tb1 & 3;
4049fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4059fa3e853Sbellard         if (tb1 == tb) {
4069fa3e853Sbellard             *ptb = tb1->page_next[n1];
4079fa3e853Sbellard             break;
4089fa3e853Sbellard         }
4099fa3e853Sbellard         ptb = &tb1->page_next[n1];
4109fa3e853Sbellard     }
4119fa3e853Sbellard }
4129fa3e853Sbellard 
413d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
414d4e8164fSbellard {
415d4e8164fSbellard     TranslationBlock *tb1, **ptb;
416d4e8164fSbellard     unsigned int n1;
417d4e8164fSbellard 
418d4e8164fSbellard     ptb = &tb->jmp_next[n];
419d4e8164fSbellard     tb1 = *ptb;
420d4e8164fSbellard     if (tb1) {
421d4e8164fSbellard         /* find tb(n) in circular list */
422d4e8164fSbellard         for(;;) {
423d4e8164fSbellard             tb1 = *ptb;
424d4e8164fSbellard             n1 = (long)tb1 & 3;
425d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
426d4e8164fSbellard             if (n1 == n && tb1 == tb)
427d4e8164fSbellard                 break;
428d4e8164fSbellard             if (n1 == 2) {
429d4e8164fSbellard                 ptb = &tb1->jmp_first;
430d4e8164fSbellard             } else {
431d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
432d4e8164fSbellard             }
433d4e8164fSbellard         }
434d4e8164fSbellard         /* now we can suppress tb(n) from the list */
435d4e8164fSbellard         *ptb = tb->jmp_next[n];
436d4e8164fSbellard 
437d4e8164fSbellard         tb->jmp_next[n] = NULL;
438d4e8164fSbellard     }
439d4e8164fSbellard }
440d4e8164fSbellard 
441d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
442d4e8164fSbellard    another TB */
443d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
444d4e8164fSbellard {
445d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
446d4e8164fSbellard }
447d4e8164fSbellard 
4489fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
449fd6ce8f6Sbellard {
4506a00d601Sbellard     CPUState *env;
451fd6ce8f6Sbellard     PageDesc *p;
4528a40a180Sbellard     unsigned int h, n1;
4539fa3e853Sbellard     target_ulong phys_pc;
4548a40a180Sbellard     TranslationBlock *tb1, *tb2;
455fd6ce8f6Sbellard 
4569fa3e853Sbellard     /* remove the TB from the hash list */
4579fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4589fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4599fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4609fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
4619fa3e853Sbellard 
4629fa3e853Sbellard     /* remove the TB from the page list */
4639fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
4649fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
4659fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4669fa3e853Sbellard         invalidate_page_bitmap(p);
4679fa3e853Sbellard     }
4689fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
4699fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
4709fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4719fa3e853Sbellard         invalidate_page_bitmap(p);
4729fa3e853Sbellard     }
4739fa3e853Sbellard 
4748a40a180Sbellard     tb_invalidated_flag = 1;
4758a40a180Sbellard 
4768a40a180Sbellard     /* remove the TB from the hash list */
4778a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
4786a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
4796a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
4806a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
4816a00d601Sbellard     }
4828a40a180Sbellard 
4838a40a180Sbellard     /* suppress this TB from the two jump lists */
4848a40a180Sbellard     tb_jmp_remove(tb, 0);
4858a40a180Sbellard     tb_jmp_remove(tb, 1);
4868a40a180Sbellard 
4878a40a180Sbellard     /* suppress any remaining jumps to this TB */
4888a40a180Sbellard     tb1 = tb->jmp_first;
4898a40a180Sbellard     for(;;) {
4908a40a180Sbellard         n1 = (long)tb1 & 3;
4918a40a180Sbellard         if (n1 == 2)
4928a40a180Sbellard             break;
4938a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4948a40a180Sbellard         tb2 = tb1->jmp_next[n1];
4958a40a180Sbellard         tb_reset_jump(tb1, n1);
4968a40a180Sbellard         tb1->jmp_next[n1] = NULL;
4978a40a180Sbellard         tb1 = tb2;
4988a40a180Sbellard     }
4998a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5008a40a180Sbellard 
501e3db7226Sbellard     tb_phys_invalidate_count++;
5029fa3e853Sbellard }
5039fa3e853Sbellard 
5049fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5059fa3e853Sbellard {
5069fa3e853Sbellard     int end, mask, end1;
5079fa3e853Sbellard 
5089fa3e853Sbellard     end = start + len;
5099fa3e853Sbellard     tab += start >> 3;
5109fa3e853Sbellard     mask = 0xff << (start & 7);
5119fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5129fa3e853Sbellard         if (start < end) {
5139fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5149fa3e853Sbellard             *tab |= mask;
5159fa3e853Sbellard         }
5169fa3e853Sbellard     } else {
5179fa3e853Sbellard         *tab++ |= mask;
5189fa3e853Sbellard         start = (start + 8) & ~7;
5199fa3e853Sbellard         end1 = end & ~7;
5209fa3e853Sbellard         while (start < end1) {
5219fa3e853Sbellard             *tab++ = 0xff;
5229fa3e853Sbellard             start += 8;
5239fa3e853Sbellard         }
5249fa3e853Sbellard         if (start < end) {
5259fa3e853Sbellard             mask = ~(0xff << (end & 7));
5269fa3e853Sbellard             *tab |= mask;
5279fa3e853Sbellard         }
5289fa3e853Sbellard     }
5299fa3e853Sbellard }
5309fa3e853Sbellard 
5319fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5329fa3e853Sbellard {
5339fa3e853Sbellard     int n, tb_start, tb_end;
5349fa3e853Sbellard     TranslationBlock *tb;
5359fa3e853Sbellard 
53659817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5379fa3e853Sbellard     if (!p->code_bitmap)
5389fa3e853Sbellard         return;
5399fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5409fa3e853Sbellard 
5419fa3e853Sbellard     tb = p->first_tb;
5429fa3e853Sbellard     while (tb != NULL) {
5439fa3e853Sbellard         n = (long)tb & 3;
5449fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5459fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5469fa3e853Sbellard         if (n == 0) {
5479fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5489fa3e853Sbellard                it is not a problem */
5499fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5509fa3e853Sbellard             tb_end = tb_start + tb->size;
5519fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5529fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5539fa3e853Sbellard         } else {
5549fa3e853Sbellard             tb_start = 0;
5559fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5569fa3e853Sbellard         }
5579fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5589fa3e853Sbellard         tb = tb->page_next[n];
5599fa3e853Sbellard     }
5609fa3e853Sbellard }
5619fa3e853Sbellard 
562d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
563d720b93dSbellard 
564d720b93dSbellard static void tb_gen_code(CPUState *env,
565d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
566d720b93dSbellard                         int cflags)
567d720b93dSbellard {
568d720b93dSbellard     TranslationBlock *tb;
569d720b93dSbellard     uint8_t *tc_ptr;
570d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
571d720b93dSbellard     int code_gen_size;
572d720b93dSbellard 
573c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
574c27004ecSbellard     tb = tb_alloc(pc);
575d720b93dSbellard     if (!tb) {
576d720b93dSbellard         /* flush must be done */
577d720b93dSbellard         tb_flush(env);
578d720b93dSbellard         /* cannot fail at this point */
579c27004ecSbellard         tb = tb_alloc(pc);
580d720b93dSbellard     }
581d720b93dSbellard     tc_ptr = code_gen_ptr;
582d720b93dSbellard     tb->tc_ptr = tc_ptr;
583d720b93dSbellard     tb->cs_base = cs_base;
584d720b93dSbellard     tb->flags = flags;
585d720b93dSbellard     tb->cflags = cflags;
586d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
587d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
588d720b93dSbellard 
589d720b93dSbellard     /* check next page if needed */
590c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
591d720b93dSbellard     phys_page2 = -1;
592c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
593d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
594d720b93dSbellard     }
595d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
596d720b93dSbellard }
597d720b93dSbellard #endif
598d720b93dSbellard 
5999fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6009fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
601d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
602d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
603d720b93dSbellard    TB if code is modified inside this TB. */
604d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
605d720b93dSbellard                                    int is_cpu_write_access)
6069fa3e853Sbellard {
607d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
608d720b93dSbellard     CPUState *env = cpu_single_env;
6099fa3e853Sbellard     PageDesc *p;
610ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6119fa3e853Sbellard     target_ulong tb_start, tb_end;
612d720b93dSbellard     target_ulong current_pc, current_cs_base;
6139fa3e853Sbellard 
6149fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6159fa3e853Sbellard     if (!p)
6169fa3e853Sbellard         return;
6179fa3e853Sbellard     if (!p->code_bitmap &&
618d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
619d720b93dSbellard         is_cpu_write_access) {
6209fa3e853Sbellard         /* build code bitmap */
6219fa3e853Sbellard         build_page_bitmap(p);
6229fa3e853Sbellard     }
6239fa3e853Sbellard 
6249fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6259fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
626d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
627d720b93dSbellard     current_tb_modified = 0;
628d720b93dSbellard     current_tb = NULL; /* avoid warning */
629d720b93dSbellard     current_pc = 0; /* avoid warning */
630d720b93dSbellard     current_cs_base = 0; /* avoid warning */
631d720b93dSbellard     current_flags = 0; /* avoid warning */
6329fa3e853Sbellard     tb = p->first_tb;
6339fa3e853Sbellard     while (tb != NULL) {
6349fa3e853Sbellard         n = (long)tb & 3;
6359fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6369fa3e853Sbellard         tb_next = tb->page_next[n];
6379fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6389fa3e853Sbellard         if (n == 0) {
6399fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6409fa3e853Sbellard                it is not a problem */
6419fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6429fa3e853Sbellard             tb_end = tb_start + tb->size;
6439fa3e853Sbellard         } else {
6449fa3e853Sbellard             tb_start = tb->page_addr[1];
6459fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6469fa3e853Sbellard         }
6479fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
648d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
649d720b93dSbellard             if (current_tb_not_found) {
650d720b93dSbellard                 current_tb_not_found = 0;
651d720b93dSbellard                 current_tb = NULL;
652d720b93dSbellard                 if (env->mem_write_pc) {
653d720b93dSbellard                     /* now we have a real cpu fault */
654d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
655d720b93dSbellard                 }
656d720b93dSbellard             }
657d720b93dSbellard             if (current_tb == tb &&
658d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
659d720b93dSbellard                 /* If we are modifying the current TB, we must stop
660d720b93dSbellard                 its execution. We could be more precise by checking
661d720b93dSbellard                 that the modification is after the current PC, but it
662d720b93dSbellard                 would require a specialized function to partially
663d720b93dSbellard                 restore the CPU state */
664d720b93dSbellard 
665d720b93dSbellard                 current_tb_modified = 1;
666d720b93dSbellard                 cpu_restore_state(current_tb, env,
667d720b93dSbellard                                   env->mem_write_pc, NULL);
668d720b93dSbellard #if defined(TARGET_I386)
669d720b93dSbellard                 current_flags = env->hflags;
670d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
671d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
672d720b93dSbellard                 current_pc = current_cs_base + env->eip;
673d720b93dSbellard #else
674d720b93dSbellard #error unsupported CPU
675d720b93dSbellard #endif
676d720b93dSbellard             }
677d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
6786f5a9f7eSbellard             /* we need to do that to handle the case where a signal
6796f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
6806f5a9f7eSbellard             saved_tb = NULL;
6816f5a9f7eSbellard             if (env) {
682ea1c1802Sbellard                 saved_tb = env->current_tb;
683ea1c1802Sbellard                 env->current_tb = NULL;
6846f5a9f7eSbellard             }
6859fa3e853Sbellard             tb_phys_invalidate(tb, -1);
6866f5a9f7eSbellard             if (env) {
687ea1c1802Sbellard                 env->current_tb = saved_tb;
688ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
689ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
6909fa3e853Sbellard             }
6916f5a9f7eSbellard         }
6929fa3e853Sbellard         tb = tb_next;
6939fa3e853Sbellard     }
6949fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
6959fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
6969fa3e853Sbellard     if (!p->first_tb) {
6979fa3e853Sbellard         invalidate_page_bitmap(p);
698d720b93dSbellard         if (is_cpu_write_access) {
699d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
700d720b93dSbellard         }
701d720b93dSbellard     }
702d720b93dSbellard #endif
703d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
704d720b93dSbellard     if (current_tb_modified) {
705d720b93dSbellard         /* we generate a block containing just the instruction
706d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
707d720b93dSbellard            itself */
708ea1c1802Sbellard         env->current_tb = NULL;
709d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
710d720b93dSbellard                     CF_SINGLE_INSN);
711d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7129fa3e853Sbellard     }
7139fa3e853Sbellard #endif
7149fa3e853Sbellard }
7159fa3e853Sbellard 
7169fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
717d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7189fa3e853Sbellard {
7199fa3e853Sbellard     PageDesc *p;
7209fa3e853Sbellard     int offset, b;
72159817ccbSbellard #if 0
722a4193c8aSbellard     if (1) {
723a4193c8aSbellard         if (loglevel) {
724a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
725a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
726a4193c8aSbellard                    cpu_single_env->eip,
727a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
728a4193c8aSbellard         }
72959817ccbSbellard     }
73059817ccbSbellard #endif
7319fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7329fa3e853Sbellard     if (!p)
7339fa3e853Sbellard         return;
7349fa3e853Sbellard     if (p->code_bitmap) {
7359fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7369fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7379fa3e853Sbellard         if (b & ((1 << len) - 1))
7389fa3e853Sbellard             goto do_invalidate;
7399fa3e853Sbellard     } else {
7409fa3e853Sbellard     do_invalidate:
741d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7429fa3e853Sbellard     }
7439fa3e853Sbellard }
7449fa3e853Sbellard 
7459fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
746d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
747d720b93dSbellard                                     unsigned long pc, void *puc)
7489fa3e853Sbellard {
749d720b93dSbellard     int n, current_flags, current_tb_modified;
750d720b93dSbellard     target_ulong current_pc, current_cs_base;
7519fa3e853Sbellard     PageDesc *p;
752d720b93dSbellard     TranslationBlock *tb, *current_tb;
753d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
754d720b93dSbellard     CPUState *env = cpu_single_env;
755d720b93dSbellard #endif
7569fa3e853Sbellard 
7579fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7589fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
759fd6ce8f6Sbellard     if (!p)
760fd6ce8f6Sbellard         return;
761fd6ce8f6Sbellard     tb = p->first_tb;
762d720b93dSbellard     current_tb_modified = 0;
763d720b93dSbellard     current_tb = NULL;
764d720b93dSbellard     current_pc = 0; /* avoid warning */
765d720b93dSbellard     current_cs_base = 0; /* avoid warning */
766d720b93dSbellard     current_flags = 0; /* avoid warning */
767d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
768d720b93dSbellard     if (tb && pc != 0) {
769d720b93dSbellard         current_tb = tb_find_pc(pc);
770d720b93dSbellard     }
771d720b93dSbellard #endif
772fd6ce8f6Sbellard     while (tb != NULL) {
7739fa3e853Sbellard         n = (long)tb & 3;
7749fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
775d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
776d720b93dSbellard         if (current_tb == tb &&
777d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
778d720b93dSbellard                 /* If we are modifying the current TB, we must stop
779d720b93dSbellard                    its execution. We could be more precise by checking
780d720b93dSbellard                    that the modification is after the current PC, but it
781d720b93dSbellard                    would require a specialized function to partially
782d720b93dSbellard                    restore the CPU state */
783d720b93dSbellard 
784d720b93dSbellard             current_tb_modified = 1;
785d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
786d720b93dSbellard #if defined(TARGET_I386)
787d720b93dSbellard             current_flags = env->hflags;
788d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
789d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
790d720b93dSbellard             current_pc = current_cs_base + env->eip;
791d720b93dSbellard #else
792d720b93dSbellard #error unsupported CPU
793d720b93dSbellard #endif
794d720b93dSbellard         }
795d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7969fa3e853Sbellard         tb_phys_invalidate(tb, addr);
7979fa3e853Sbellard         tb = tb->page_next[n];
798fd6ce8f6Sbellard     }
799fd6ce8f6Sbellard     p->first_tb = NULL;
800d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
801d720b93dSbellard     if (current_tb_modified) {
802d720b93dSbellard         /* we generate a block containing just the instruction
803d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
804d720b93dSbellard            itself */
805ea1c1802Sbellard         env->current_tb = NULL;
806d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
807d720b93dSbellard                     CF_SINGLE_INSN);
808d720b93dSbellard         cpu_resume_from_signal(env, puc);
809d720b93dSbellard     }
810d720b93dSbellard #endif
811fd6ce8f6Sbellard }
8129fa3e853Sbellard #endif
813fd6ce8f6Sbellard 
814fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8159fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
81653a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
817fd6ce8f6Sbellard {
818fd6ce8f6Sbellard     PageDesc *p;
8199fa3e853Sbellard     TranslationBlock *last_first_tb;
8209fa3e853Sbellard 
8219fa3e853Sbellard     tb->page_addr[n] = page_addr;
8223a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8239fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8249fa3e853Sbellard     last_first_tb = p->first_tb;
8259fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8269fa3e853Sbellard     invalidate_page_bitmap(p);
8279fa3e853Sbellard 
828107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
829d720b93dSbellard 
8309fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8319fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
83253a5960aSpbrook         target_ulong addr;
83353a5960aSpbrook         PageDesc *p2;
834fd6ce8f6Sbellard         int prot;
835fd6ce8f6Sbellard 
836fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
837fd6ce8f6Sbellard            page fault + mprotect overhead) */
83853a5960aSpbrook         page_addr &= qemu_host_page_mask;
839fd6ce8f6Sbellard         prot = 0;
84053a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
84153a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
84253a5960aSpbrook 
84353a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
84453a5960aSpbrook             if (!p2)
84553a5960aSpbrook                 continue;
84653a5960aSpbrook             prot |= p2->flags;
84753a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
84853a5960aSpbrook             page_get_flags(addr);
84953a5960aSpbrook           }
85053a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
851fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
852fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
853fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
85453a5960aSpbrook                page_addr);
855fd6ce8f6Sbellard #endif
856fd6ce8f6Sbellard     }
8579fa3e853Sbellard #else
8589fa3e853Sbellard     /* if some code is already present, then the pages are already
8599fa3e853Sbellard        protected. So we handle the case where only the first TB is
8609fa3e853Sbellard        allocated in a physical page */
8619fa3e853Sbellard     if (!last_first_tb) {
8626a00d601Sbellard         tlb_protect_code(page_addr);
8639fa3e853Sbellard     }
8649fa3e853Sbellard #endif
865d720b93dSbellard 
866d720b93dSbellard #endif /* TARGET_HAS_SMC */
867fd6ce8f6Sbellard }
868fd6ce8f6Sbellard 
869fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
870fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
871c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
872fd6ce8f6Sbellard {
873fd6ce8f6Sbellard     TranslationBlock *tb;
874fd6ce8f6Sbellard 
875fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
876fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
877d4e8164fSbellard         return NULL;
878fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
879fd6ce8f6Sbellard     tb->pc = pc;
880b448f2f3Sbellard     tb->cflags = 0;
881d4e8164fSbellard     return tb;
882d4e8164fSbellard }
883d4e8164fSbellard 
8849fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
8859fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
8869fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
8879fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
888d4e8164fSbellard {
8899fa3e853Sbellard     unsigned int h;
8909fa3e853Sbellard     TranslationBlock **ptb;
8919fa3e853Sbellard 
8929fa3e853Sbellard     /* add in the physical hash table */
8939fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8949fa3e853Sbellard     ptb = &tb_phys_hash[h];
8959fa3e853Sbellard     tb->phys_hash_next = *ptb;
8969fa3e853Sbellard     *ptb = tb;
897fd6ce8f6Sbellard 
898fd6ce8f6Sbellard     /* add in the page list */
8999fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9009fa3e853Sbellard     if (phys_page2 != -1)
9019fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9029fa3e853Sbellard     else
9039fa3e853Sbellard         tb->page_addr[1] = -1;
9049fa3e853Sbellard 
905d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
906d4e8164fSbellard     tb->jmp_next[0] = NULL;
907d4e8164fSbellard     tb->jmp_next[1] = NULL;
908b448f2f3Sbellard #ifdef USE_CODE_COPY
909b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
910b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
911b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
912b448f2f3Sbellard #endif
913d4e8164fSbellard 
914d4e8164fSbellard     /* init original jump addresses */
915d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
916d4e8164fSbellard         tb_reset_jump(tb, 0);
917d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
918d4e8164fSbellard         tb_reset_jump(tb, 1);
9198a40a180Sbellard 
9208a40a180Sbellard #ifdef DEBUG_TB_CHECK
9218a40a180Sbellard     tb_page_check();
9228a40a180Sbellard #endif
923fd6ce8f6Sbellard }
924fd6ce8f6Sbellard 
925a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
926a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
927a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
928a513fe19Sbellard {
929a513fe19Sbellard     int m_min, m_max, m;
930a513fe19Sbellard     unsigned long v;
931a513fe19Sbellard     TranslationBlock *tb;
932a513fe19Sbellard 
933a513fe19Sbellard     if (nb_tbs <= 0)
934a513fe19Sbellard         return NULL;
935a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
936a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
937a513fe19Sbellard         return NULL;
938a513fe19Sbellard     /* binary search (cf Knuth) */
939a513fe19Sbellard     m_min = 0;
940a513fe19Sbellard     m_max = nb_tbs - 1;
941a513fe19Sbellard     while (m_min <= m_max) {
942a513fe19Sbellard         m = (m_min + m_max) >> 1;
943a513fe19Sbellard         tb = &tbs[m];
944a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
945a513fe19Sbellard         if (v == tc_ptr)
946a513fe19Sbellard             return tb;
947a513fe19Sbellard         else if (tc_ptr < v) {
948a513fe19Sbellard             m_max = m - 1;
949a513fe19Sbellard         } else {
950a513fe19Sbellard             m_min = m + 1;
951a513fe19Sbellard         }
952a513fe19Sbellard     }
953a513fe19Sbellard     return &tbs[m_max];
954a513fe19Sbellard }
9557501267eSbellard 
956ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
957ea041c0eSbellard 
958ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
959ea041c0eSbellard {
960ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
961ea041c0eSbellard     unsigned int n1;
962ea041c0eSbellard 
963ea041c0eSbellard     tb1 = tb->jmp_next[n];
964ea041c0eSbellard     if (tb1 != NULL) {
965ea041c0eSbellard         /* find head of list */
966ea041c0eSbellard         for(;;) {
967ea041c0eSbellard             n1 = (long)tb1 & 3;
968ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
969ea041c0eSbellard             if (n1 == 2)
970ea041c0eSbellard                 break;
971ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
972ea041c0eSbellard         }
973ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
974ea041c0eSbellard         tb_next = tb1;
975ea041c0eSbellard 
976ea041c0eSbellard         /* remove tb from the jmp_first list */
977ea041c0eSbellard         ptb = &tb_next->jmp_first;
978ea041c0eSbellard         for(;;) {
979ea041c0eSbellard             tb1 = *ptb;
980ea041c0eSbellard             n1 = (long)tb1 & 3;
981ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
982ea041c0eSbellard             if (n1 == n && tb1 == tb)
983ea041c0eSbellard                 break;
984ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
985ea041c0eSbellard         }
986ea041c0eSbellard         *ptb = tb->jmp_next[n];
987ea041c0eSbellard         tb->jmp_next[n] = NULL;
988ea041c0eSbellard 
989ea041c0eSbellard         /* suppress the jump to next tb in generated code */
990ea041c0eSbellard         tb_reset_jump(tb, n);
991ea041c0eSbellard 
9920124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
993ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
994ea041c0eSbellard     }
995ea041c0eSbellard }
996ea041c0eSbellard 
997ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
998ea041c0eSbellard {
999ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1000ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1001ea041c0eSbellard }
1002ea041c0eSbellard 
10031fddef4bSbellard #if defined(TARGET_HAS_ICE)
1004d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005d720b93dSbellard {
1006c2f07f81Spbrook     target_ulong addr, pd;
1007c2f07f81Spbrook     ram_addr_t ram_addr;
1008c2f07f81Spbrook     PhysPageDesc *p;
1009d720b93dSbellard 
1010c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1011c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1012c2f07f81Spbrook     if (!p) {
1013c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1014c2f07f81Spbrook     } else {
1015c2f07f81Spbrook         pd = p->phys_offset;
1016c2f07f81Spbrook     }
1017c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1018706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1019d720b93dSbellard }
1020c27004ecSbellard #endif
1021d720b93dSbellard 
1022c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1023c33a346eSbellard    breakpoint is reached */
10242e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
10254c3a88a2Sbellard {
10261fddef4bSbellard #if defined(TARGET_HAS_ICE)
10274c3a88a2Sbellard     int i;
10284c3a88a2Sbellard 
10294c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10304c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10314c3a88a2Sbellard             return 0;
10324c3a88a2Sbellard     }
10334c3a88a2Sbellard 
10344c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
10354c3a88a2Sbellard         return -1;
10364c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1037d720b93dSbellard 
1038d720b93dSbellard     breakpoint_invalidate(env, pc);
10394c3a88a2Sbellard     return 0;
10404c3a88a2Sbellard #else
10414c3a88a2Sbellard     return -1;
10424c3a88a2Sbellard #endif
10434c3a88a2Sbellard }
10444c3a88a2Sbellard 
10454c3a88a2Sbellard /* remove a breakpoint */
10462e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
10474c3a88a2Sbellard {
10481fddef4bSbellard #if defined(TARGET_HAS_ICE)
10494c3a88a2Sbellard     int i;
10504c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10514c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10524c3a88a2Sbellard             goto found;
10534c3a88a2Sbellard     }
10544c3a88a2Sbellard     return -1;
10554c3a88a2Sbellard  found:
10564c3a88a2Sbellard     env->nb_breakpoints--;
10571fddef4bSbellard     if (i < env->nb_breakpoints)
10581fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1059d720b93dSbellard 
1060d720b93dSbellard     breakpoint_invalidate(env, pc);
10614c3a88a2Sbellard     return 0;
10624c3a88a2Sbellard #else
10634c3a88a2Sbellard     return -1;
10644c3a88a2Sbellard #endif
10654c3a88a2Sbellard }
10664c3a88a2Sbellard 
1067c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1068c33a346eSbellard    CPU loop after each instruction */
1069c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1070c33a346eSbellard {
10711fddef4bSbellard #if defined(TARGET_HAS_ICE)
1072c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1073c33a346eSbellard         env->singlestep_enabled = enabled;
1074c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
10759fa3e853Sbellard         /* XXX: only flush what is necessary */
10760124311eSbellard         tb_flush(env);
1077c33a346eSbellard     }
1078c33a346eSbellard #endif
1079c33a346eSbellard }
1080c33a346eSbellard 
108134865134Sbellard /* enable or disable low levels log */
108234865134Sbellard void cpu_set_log(int log_flags)
108334865134Sbellard {
108434865134Sbellard     loglevel = log_flags;
108534865134Sbellard     if (loglevel && !logfile) {
108634865134Sbellard         logfile = fopen(logfilename, "w");
108734865134Sbellard         if (!logfile) {
108834865134Sbellard             perror(logfilename);
108934865134Sbellard             _exit(1);
109034865134Sbellard         }
10919fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
10929fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
10939fa3e853Sbellard         {
10949fa3e853Sbellard             static uint8_t logfile_buf[4096];
10959fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
10969fa3e853Sbellard         }
10979fa3e853Sbellard #else
109834865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
10999fa3e853Sbellard #endif
110034865134Sbellard     }
110134865134Sbellard }
110234865134Sbellard 
110334865134Sbellard void cpu_set_log_filename(const char *filename)
110434865134Sbellard {
110534865134Sbellard     logfilename = strdup(filename);
110634865134Sbellard }
1107c33a346eSbellard 
11080124311eSbellard /* mask must never be zero, except for A20 change call */
110968a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1110ea041c0eSbellard {
1111ea041c0eSbellard     TranslationBlock *tb;
1112ee8b7021Sbellard     static int interrupt_lock;
1113ea041c0eSbellard 
111468a79315Sbellard     env->interrupt_request |= mask;
1115ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1116ea041c0eSbellard        all the potentially executing TB */
1117ea041c0eSbellard     tb = env->current_tb;
1118ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1119ee8b7021Sbellard         env->current_tb = NULL;
1120ea041c0eSbellard         tb_reset_jump_recursive(tb);
1121ee8b7021Sbellard         interrupt_lock = 0;
1122ea041c0eSbellard     }
1123ea041c0eSbellard }
1124ea041c0eSbellard 
1125b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1126b54ad049Sbellard {
1127b54ad049Sbellard     env->interrupt_request &= ~mask;
1128b54ad049Sbellard }
1129b54ad049Sbellard 
1130f193c797Sbellard CPULogItem cpu_log_items[] = {
1131f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1132f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1133f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1134f193c797Sbellard       "show target assembly code for each compiled TB" },
1135f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1136f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1137f193c797Sbellard #ifdef TARGET_I386
1138f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1139f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1140f193c797Sbellard #endif
1141f193c797Sbellard     { CPU_LOG_INT, "int",
1142f193c797Sbellard       "show interrupts/exceptions in short format" },
1143f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1144f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
11459fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
11469fddaa0cSbellard       "show CPU state before bloc translation" },
1147f193c797Sbellard #ifdef TARGET_I386
1148f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1149f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1150f193c797Sbellard #endif
11518e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1152fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1153fd872598Sbellard       "show all i/o ports accesses" },
11548e3a9fd2Sbellard #endif
1155f193c797Sbellard     { 0, NULL, NULL },
1156f193c797Sbellard };
1157f193c797Sbellard 
1158f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1159f193c797Sbellard {
1160f193c797Sbellard     if (strlen(s2) != n)
1161f193c797Sbellard         return 0;
1162f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1163f193c797Sbellard }
1164f193c797Sbellard 
1165f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1166f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1167f193c797Sbellard {
1168f193c797Sbellard     CPULogItem *item;
1169f193c797Sbellard     int mask;
1170f193c797Sbellard     const char *p, *p1;
1171f193c797Sbellard 
1172f193c797Sbellard     p = str;
1173f193c797Sbellard     mask = 0;
1174f193c797Sbellard     for(;;) {
1175f193c797Sbellard         p1 = strchr(p, ',');
1176f193c797Sbellard         if (!p1)
1177f193c797Sbellard             p1 = p + strlen(p);
11788e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
11798e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
11808e3a9fd2Sbellard 			mask |= item->mask;
11818e3a9fd2Sbellard 		}
11828e3a9fd2Sbellard 	} else {
1183f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1184f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1185f193c797Sbellard                 goto found;
1186f193c797Sbellard         }
1187f193c797Sbellard         return 0;
11888e3a9fd2Sbellard 	}
1189f193c797Sbellard     found:
1190f193c797Sbellard         mask |= item->mask;
1191f193c797Sbellard         if (*p1 != ',')
1192f193c797Sbellard             break;
1193f193c797Sbellard         p = p1 + 1;
1194f193c797Sbellard     }
1195f193c797Sbellard     return mask;
1196f193c797Sbellard }
1197ea041c0eSbellard 
11987501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
11997501267eSbellard {
12007501267eSbellard     va_list ap;
12017501267eSbellard 
12027501267eSbellard     va_start(ap, fmt);
12037501267eSbellard     fprintf(stderr, "qemu: fatal: ");
12047501267eSbellard     vfprintf(stderr, fmt, ap);
12057501267eSbellard     fprintf(stderr, "\n");
12067501267eSbellard #ifdef TARGET_I386
12077fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
12087fe48483Sbellard #else
12097fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
12107501267eSbellard #endif
12117501267eSbellard     va_end(ap);
12127501267eSbellard     abort();
12137501267eSbellard }
12147501267eSbellard 
12150124311eSbellard #if !defined(CONFIG_USER_ONLY)
12160124311eSbellard 
1217ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1218ee8b7021Sbellard    implemented yet) */
1219ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
122033417e70Sbellard {
122133417e70Sbellard     int i;
12220124311eSbellard 
12239fa3e853Sbellard #if defined(DEBUG_TLB)
12249fa3e853Sbellard     printf("tlb_flush:\n");
12259fa3e853Sbellard #endif
12260124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12270124311eSbellard        links while we are modifying them */
12280124311eSbellard     env->current_tb = NULL;
12290124311eSbellard 
123033417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
123184b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
123284b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
123384b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
123484b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
123584b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
123684b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
123733417e70Sbellard     }
12389fa3e853Sbellard 
12398a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
12409fa3e853Sbellard 
12419fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12429fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
12439fa3e853Sbellard #endif
12440a962c02Sbellard #ifdef USE_KQEMU
12450a962c02Sbellard     if (env->kqemu_enabled) {
12460a962c02Sbellard         kqemu_flush(env, flush_global);
12470a962c02Sbellard     }
12480a962c02Sbellard #endif
1249e3db7226Sbellard     tlb_flush_count++;
125033417e70Sbellard }
125133417e70Sbellard 
1252274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
125361382a50Sbellard {
125484b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
125584b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
125684b7b8e7Sbellard         addr == (tlb_entry->addr_write &
125784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
125884b7b8e7Sbellard         addr == (tlb_entry->addr_code &
125984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
126084b7b8e7Sbellard         tlb_entry->addr_read = -1;
126184b7b8e7Sbellard         tlb_entry->addr_write = -1;
126284b7b8e7Sbellard         tlb_entry->addr_code = -1;
126384b7b8e7Sbellard     }
126461382a50Sbellard }
126561382a50Sbellard 
12662e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
126733417e70Sbellard {
12688a40a180Sbellard     int i;
12699fa3e853Sbellard     TranslationBlock *tb;
12700124311eSbellard 
12719fa3e853Sbellard #if defined(DEBUG_TLB)
1272108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
12739fa3e853Sbellard #endif
12740124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12750124311eSbellard        links while we are modifying them */
12760124311eSbellard     env->current_tb = NULL;
127733417e70Sbellard 
127861382a50Sbellard     addr &= TARGET_PAGE_MASK;
127933417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
128084b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
128184b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
12820124311eSbellard 
12838a40a180Sbellard     for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
12848a40a180Sbellard         tb = env->tb_jmp_cache[i];
12858a40a180Sbellard         if (tb &&
12868a40a180Sbellard             ((tb->pc & TARGET_PAGE_MASK) == addr ||
12878a40a180Sbellard              ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
12888a40a180Sbellard             env->tb_jmp_cache[i] = NULL;
12899fa3e853Sbellard         }
129061382a50Sbellard     }
129161382a50Sbellard 
12929fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12939fa3e853Sbellard     if (addr < MMAP_AREA_END)
12949fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
12959fa3e853Sbellard #endif
12960a962c02Sbellard #ifdef USE_KQEMU
12970a962c02Sbellard     if (env->kqemu_enabled) {
12980a962c02Sbellard         kqemu_flush_page(env, addr);
12990a962c02Sbellard     }
13000a962c02Sbellard #endif
13019fa3e853Sbellard }
13029fa3e853Sbellard 
13039fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
13049fa3e853Sbellard    can be detected */
13056a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
130661382a50Sbellard {
13076a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
13086a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
13096a00d601Sbellard                                     CODE_DIRTY_FLAG);
13109fa3e853Sbellard }
13119fa3e853Sbellard 
13129fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
13133a7d929eSbellard    tested for self modifying code */
13143a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
13153a7d929eSbellard                                     target_ulong vaddr)
13169fa3e853Sbellard {
13173a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
13189fa3e853Sbellard }
13199fa3e853Sbellard 
13201ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
13211ccde1cbSbellard                                          unsigned long start, unsigned long length)
13221ccde1cbSbellard {
13231ccde1cbSbellard     unsigned long addr;
132484b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
132584b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
13261ccde1cbSbellard         if ((addr - start) < length) {
132784b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
13281ccde1cbSbellard         }
13291ccde1cbSbellard     }
13301ccde1cbSbellard }
13311ccde1cbSbellard 
13323a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
13330a962c02Sbellard                                      int dirty_flags)
13341ccde1cbSbellard {
13351ccde1cbSbellard     CPUState *env;
13364f2ac237Sbellard     unsigned long length, start1;
13370a962c02Sbellard     int i, mask, len;
13380a962c02Sbellard     uint8_t *p;
13391ccde1cbSbellard 
13401ccde1cbSbellard     start &= TARGET_PAGE_MASK;
13411ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
13421ccde1cbSbellard 
13431ccde1cbSbellard     length = end - start;
13441ccde1cbSbellard     if (length == 0)
13451ccde1cbSbellard         return;
13460a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
13473a7d929eSbellard #ifdef USE_KQEMU
13486a00d601Sbellard     /* XXX: should not depend on cpu context */
13496a00d601Sbellard     env = first_cpu;
13503a7d929eSbellard     if (env->kqemu_enabled) {
1351f23db169Sbellard         ram_addr_t addr;
1352f23db169Sbellard         addr = start;
1353f23db169Sbellard         for(i = 0; i < len; i++) {
1354f23db169Sbellard             kqemu_set_notdirty(env, addr);
1355f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1356f23db169Sbellard         }
13573a7d929eSbellard     }
13583a7d929eSbellard #endif
1359f23db169Sbellard     mask = ~dirty_flags;
1360f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1361f23db169Sbellard     for(i = 0; i < len; i++)
1362f23db169Sbellard         p[i] &= mask;
1363f23db169Sbellard 
13641ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
13651ccde1cbSbellard        when accessing the range */
136659817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
13676a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
13681ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
136984b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
13701ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
137184b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
13726a00d601Sbellard     }
137359817ccbSbellard 
137459817ccbSbellard #if !defined(CONFIG_SOFTMMU)
137559817ccbSbellard     /* XXX: this is expensive */
137659817ccbSbellard     {
137759817ccbSbellard         VirtPageDesc *p;
137859817ccbSbellard         int j;
137959817ccbSbellard         target_ulong addr;
138059817ccbSbellard 
138159817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
138259817ccbSbellard             p = l1_virt_map[i];
138359817ccbSbellard             if (p) {
138459817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
138559817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
138659817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
138759817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
138859817ccbSbellard                         (p->prot & PROT_WRITE)) {
138959817ccbSbellard                         if (addr < MMAP_AREA_END) {
139059817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
139159817ccbSbellard                                      p->prot & ~PROT_WRITE);
139259817ccbSbellard                         }
139359817ccbSbellard                     }
139459817ccbSbellard                     addr += TARGET_PAGE_SIZE;
139559817ccbSbellard                     p++;
139659817ccbSbellard                 }
139759817ccbSbellard             }
139859817ccbSbellard         }
139959817ccbSbellard     }
140059817ccbSbellard #endif
14011ccde1cbSbellard }
14021ccde1cbSbellard 
14033a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
14043a7d929eSbellard {
14053a7d929eSbellard     ram_addr_t ram_addr;
14063a7d929eSbellard 
140784b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
140884b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
14093a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
14103a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
141184b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
14123a7d929eSbellard         }
14133a7d929eSbellard     }
14143a7d929eSbellard }
14153a7d929eSbellard 
14163a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
14173a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
14183a7d929eSbellard {
14193a7d929eSbellard     int i;
14203a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
142184b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
14223a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
142384b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
14243a7d929eSbellard }
14253a7d929eSbellard 
14261ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
14271ccde1cbSbellard                                   unsigned long start)
14281ccde1cbSbellard {
14291ccde1cbSbellard     unsigned long addr;
143084b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
143184b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14321ccde1cbSbellard         if (addr == start) {
143384b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
14341ccde1cbSbellard         }
14351ccde1cbSbellard     }
14361ccde1cbSbellard }
14371ccde1cbSbellard 
14381ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
14391ccde1cbSbellard    addr so that it is no longer dirty */
14406a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
14416a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
14421ccde1cbSbellard {
14431ccde1cbSbellard     int i;
14441ccde1cbSbellard 
14451ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
14461ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144784b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
144884b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
14491ccde1cbSbellard }
14501ccde1cbSbellard 
145159817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
145259817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
145359817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
145459817ccbSbellard    conflicting with the host address space). */
145584b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
14562e12669aSbellard                       target_phys_addr_t paddr, int prot,
14579fa3e853Sbellard                       int is_user, int is_softmmu)
14589fa3e853Sbellard {
145992e873b9Sbellard     PhysPageDesc *p;
14604f2ac237Sbellard     unsigned long pd;
14619fa3e853Sbellard     unsigned int index;
14624f2ac237Sbellard     target_ulong address;
1463108c49b8Sbellard     target_phys_addr_t addend;
14649fa3e853Sbellard     int ret;
146584b7b8e7Sbellard     CPUTLBEntry *te;
14669fa3e853Sbellard 
146792e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
14689fa3e853Sbellard     if (!p) {
14699fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
14709fa3e853Sbellard     } else {
14719fa3e853Sbellard         pd = p->phys_offset;
14729fa3e853Sbellard     }
14739fa3e853Sbellard #if defined(DEBUG_TLB)
14743a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
147584b7b8e7Sbellard            vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
14769fa3e853Sbellard #endif
14779fa3e853Sbellard 
14789fa3e853Sbellard     ret = 0;
14799fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14809fa3e853Sbellard     if (is_softmmu)
14819fa3e853Sbellard #endif
14829fa3e853Sbellard     {
14839fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
14849fa3e853Sbellard             /* IO memory case */
14859fa3e853Sbellard             address = vaddr | pd;
14869fa3e853Sbellard             addend = paddr;
14879fa3e853Sbellard         } else {
14889fa3e853Sbellard             /* standard memory */
14899fa3e853Sbellard             address = vaddr;
14909fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
14919fa3e853Sbellard         }
14929fa3e853Sbellard 
149390f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
14949fa3e853Sbellard         addend -= vaddr;
149584b7b8e7Sbellard         te = &env->tlb_table[is_user][index];
149684b7b8e7Sbellard         te->addend = addend;
149767b915a5Sbellard         if (prot & PAGE_READ) {
149884b7b8e7Sbellard             te->addr_read = address;
14999fa3e853Sbellard         } else {
150084b7b8e7Sbellard             te->addr_read = -1;
150184b7b8e7Sbellard         }
150284b7b8e7Sbellard         if (prot & PAGE_EXEC) {
150384b7b8e7Sbellard             te->addr_code = address;
150484b7b8e7Sbellard         } else {
150584b7b8e7Sbellard             te->addr_code = -1;
15069fa3e853Sbellard         }
150767b915a5Sbellard         if (prot & PAGE_WRITE) {
15089fa3e853Sbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
15099fa3e853Sbellard                 /* ROM: access is ignored (same as unassigned) */
151084b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_ROM;
15113a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
15121ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
151384b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
15149fa3e853Sbellard             } else {
151584b7b8e7Sbellard                 te->addr_write = address;
15169fa3e853Sbellard             }
15179fa3e853Sbellard         } else {
151884b7b8e7Sbellard             te->addr_write = -1;
15199fa3e853Sbellard         }
15209fa3e853Sbellard     }
15219fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15229fa3e853Sbellard     else {
15239fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
15249fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
15259fa3e853Sbellard                soft MMU */
15269fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
15279fa3e853Sbellard                 ret = 2;
15289fa3e853Sbellard         } else {
15299fa3e853Sbellard             void *map_addr;
153059817ccbSbellard 
153159817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
153259817ccbSbellard                 ret = 2;
153359817ccbSbellard             } else {
15349fa3e853Sbellard                 if (prot & PROT_WRITE) {
153559817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1536d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
153759817ccbSbellard                         first_tb ||
1538d720b93dSbellard #endif
153959817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
154059817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
15419fa3e853Sbellard                         /* ROM: we do as if code was inside */
15429fa3e853Sbellard                         /* if code is present, we only map as read only and save the
15439fa3e853Sbellard                            original mapping */
15449fa3e853Sbellard                         VirtPageDesc *vp;
15459fa3e853Sbellard 
154690f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
15479fa3e853Sbellard                         vp->phys_addr = pd;
15489fa3e853Sbellard                         vp->prot = prot;
15499fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
15509fa3e853Sbellard                         prot &= ~PAGE_WRITE;
15519fa3e853Sbellard                     }
15529fa3e853Sbellard                 }
15539fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
15549fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
15559fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
15569fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
15579fa3e853Sbellard                               paddr, vaddr);
15589fa3e853Sbellard                 }
15599fa3e853Sbellard             }
15609fa3e853Sbellard         }
156159817ccbSbellard     }
15629fa3e853Sbellard #endif
15639fa3e853Sbellard     return ret;
15649fa3e853Sbellard }
15659fa3e853Sbellard 
15669fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
15679fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
156853a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
15699fa3e853Sbellard {
15709fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15719fa3e853Sbellard     VirtPageDesc *vp;
15729fa3e853Sbellard 
15739fa3e853Sbellard #if defined(DEBUG_TLB)
15749fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
15759fa3e853Sbellard #endif
15769fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
157759817ccbSbellard 
157859817ccbSbellard     /* if it is not mapped, no need to worry here */
157959817ccbSbellard     if (addr >= MMAP_AREA_END)
158059817ccbSbellard         return 0;
15819fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
15829fa3e853Sbellard     if (!vp)
15839fa3e853Sbellard         return 0;
15849fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
15859fa3e853Sbellard        validates only the code TLB */
15869fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
15879fa3e853Sbellard         return 0;
15889fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
15899fa3e853Sbellard         return 0;
15909fa3e853Sbellard #if defined(DEBUG_TLB)
15919fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
15929fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
15939fa3e853Sbellard #endif
159459817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
159559817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
159659817ccbSbellard                   (unsigned long)addr, vp->prot);
1597d720b93dSbellard     /* set the dirty bit */
15980a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1599d720b93dSbellard     /* flush the code inside */
1600d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
16019fa3e853Sbellard     return 1;
16029fa3e853Sbellard #else
16039fa3e853Sbellard     return 0;
16049fa3e853Sbellard #endif
160533417e70Sbellard }
160633417e70Sbellard 
16070124311eSbellard #else
16080124311eSbellard 
1609ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
16100124311eSbellard {
16110124311eSbellard }
16120124311eSbellard 
16132e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
16140124311eSbellard {
16150124311eSbellard }
16160124311eSbellard 
161784b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
16182e12669aSbellard                       target_phys_addr_t paddr, int prot,
16199fa3e853Sbellard                       int is_user, int is_softmmu)
162033417e70Sbellard {
16219fa3e853Sbellard     return 0;
162233417e70Sbellard }
162333417e70Sbellard 
16249fa3e853Sbellard /* dump memory mappings */
16259fa3e853Sbellard void page_dump(FILE *f)
162633417e70Sbellard {
16279fa3e853Sbellard     unsigned long start, end;
16289fa3e853Sbellard     int i, j, prot, prot1;
16299fa3e853Sbellard     PageDesc *p;
16309fa3e853Sbellard 
16319fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
16329fa3e853Sbellard             "start", "end", "size", "prot");
16339fa3e853Sbellard     start = -1;
16349fa3e853Sbellard     end = -1;
16359fa3e853Sbellard     prot = 0;
16369fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
16379fa3e853Sbellard         if (i < L1_SIZE)
16389fa3e853Sbellard             p = l1_map[i];
16399fa3e853Sbellard         else
16409fa3e853Sbellard             p = NULL;
16419fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
164233417e70Sbellard             if (!p)
16439fa3e853Sbellard                 prot1 = 0;
16449fa3e853Sbellard             else
16459fa3e853Sbellard                 prot1 = p[j].flags;
16469fa3e853Sbellard             if (prot1 != prot) {
16479fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
16489fa3e853Sbellard                 if (start != -1) {
16499fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
16509fa3e853Sbellard                             start, end, end - start,
16519fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
16529fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
16539fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
165433417e70Sbellard                 }
16559fa3e853Sbellard                 if (prot1 != 0)
16569fa3e853Sbellard                     start = end;
16579fa3e853Sbellard                 else
16589fa3e853Sbellard                     start = -1;
16599fa3e853Sbellard                 prot = prot1;
16609fa3e853Sbellard             }
16619fa3e853Sbellard             if (!p)
16629fa3e853Sbellard                 break;
16639fa3e853Sbellard         }
16649fa3e853Sbellard     }
16659fa3e853Sbellard }
16669fa3e853Sbellard 
166753a5960aSpbrook int page_get_flags(target_ulong address)
16689fa3e853Sbellard {
16699fa3e853Sbellard     PageDesc *p;
16709fa3e853Sbellard 
16719fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
16729fa3e853Sbellard     if (!p)
16739fa3e853Sbellard         return 0;
16749fa3e853Sbellard     return p->flags;
16759fa3e853Sbellard }
16769fa3e853Sbellard 
16779fa3e853Sbellard /* modify the flags of a page and invalidate the code if
16789fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
16799fa3e853Sbellard    depending on PAGE_WRITE */
168053a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
16819fa3e853Sbellard {
16829fa3e853Sbellard     PageDesc *p;
168353a5960aSpbrook     target_ulong addr;
16849fa3e853Sbellard 
16859fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
16869fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
16879fa3e853Sbellard     if (flags & PAGE_WRITE)
16889fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
16899fa3e853Sbellard     spin_lock(&tb_lock);
16909fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
16919fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
16929fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
16939fa3e853Sbellard            inside */
16949fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
16959fa3e853Sbellard             (flags & PAGE_WRITE) &&
16969fa3e853Sbellard             p->first_tb) {
1697d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
16989fa3e853Sbellard         }
16999fa3e853Sbellard         p->flags = flags;
17009fa3e853Sbellard     }
17019fa3e853Sbellard     spin_unlock(&tb_lock);
17029fa3e853Sbellard }
17039fa3e853Sbellard 
17049fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17059fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
170653a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
17079fa3e853Sbellard {
17089fa3e853Sbellard     unsigned int page_index, prot, pindex;
17099fa3e853Sbellard     PageDesc *p, *p1;
171053a5960aSpbrook     target_ulong host_start, host_end, addr;
17119fa3e853Sbellard 
171283fb7adfSbellard     host_start = address & qemu_host_page_mask;
17139fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
17149fa3e853Sbellard     p1 = page_find(page_index);
17159fa3e853Sbellard     if (!p1)
17169fa3e853Sbellard         return 0;
171783fb7adfSbellard     host_end = host_start + qemu_host_page_size;
17189fa3e853Sbellard     p = p1;
17199fa3e853Sbellard     prot = 0;
17209fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
17219fa3e853Sbellard         prot |= p->flags;
17229fa3e853Sbellard         p++;
17239fa3e853Sbellard     }
17249fa3e853Sbellard     /* if the page was really writable, then we change its
17259fa3e853Sbellard        protection back to writable */
17269fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
17279fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
17289fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
172953a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
17309fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
17319fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
17329fa3e853Sbellard             /* and since the content will be modified, we must invalidate
17339fa3e853Sbellard                the corresponding translated code. */
1734d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
17359fa3e853Sbellard #ifdef DEBUG_TB_CHECK
17369fa3e853Sbellard             tb_invalidate_check(address);
17379fa3e853Sbellard #endif
17389fa3e853Sbellard             return 1;
17399fa3e853Sbellard         }
17409fa3e853Sbellard     }
17419fa3e853Sbellard     return 0;
17429fa3e853Sbellard }
17439fa3e853Sbellard 
17449fa3e853Sbellard /* call this function when system calls directly modify a memory area */
174553a5960aSpbrook /* ??? This should be redundant now we have lock_user.  */
174653a5960aSpbrook void page_unprotect_range(target_ulong data, target_ulong data_size)
17479fa3e853Sbellard {
174853a5960aSpbrook     target_ulong start, end, addr;
17499fa3e853Sbellard 
175053a5960aSpbrook     start = data;
17519fa3e853Sbellard     end = start + data_size;
17529fa3e853Sbellard     start &= TARGET_PAGE_MASK;
17539fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
17549fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1755d720b93dSbellard         page_unprotect(addr, 0, NULL);
17569fa3e853Sbellard     }
17579fa3e853Sbellard }
17589fa3e853Sbellard 
17596a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
17606a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
17611ccde1cbSbellard {
17621ccde1cbSbellard }
17639fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
176433417e70Sbellard 
176533417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
176633417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
176733417e70Sbellard    io memory page */
17682e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
17692e12669aSbellard                                   unsigned long size,
17702e12669aSbellard                                   unsigned long phys_offset)
177133417e70Sbellard {
1772108c49b8Sbellard     target_phys_addr_t addr, end_addr;
177392e873b9Sbellard     PhysPageDesc *p;
177433417e70Sbellard 
17755fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
177633417e70Sbellard     end_addr = start_addr + size;
17775fd386f6Sbellard     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1778108c49b8Sbellard         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
17799fa3e853Sbellard         p->phys_offset = phys_offset;
17809fa3e853Sbellard         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
178133417e70Sbellard             phys_offset += TARGET_PAGE_SIZE;
178233417e70Sbellard     }
178333417e70Sbellard }
178433417e70Sbellard 
1785a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
178633417e70Sbellard {
178733417e70Sbellard     return 0;
178833417e70Sbellard }
178933417e70Sbellard 
1790a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
179133417e70Sbellard {
179233417e70Sbellard }
179333417e70Sbellard 
179433417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
179533417e70Sbellard     unassigned_mem_readb,
179633417e70Sbellard     unassigned_mem_readb,
179733417e70Sbellard     unassigned_mem_readb,
179833417e70Sbellard };
179933417e70Sbellard 
180033417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
180133417e70Sbellard     unassigned_mem_writeb,
180233417e70Sbellard     unassigned_mem_writeb,
180333417e70Sbellard     unassigned_mem_writeb,
180433417e70Sbellard };
180533417e70Sbellard 
1806a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
18071ccde1cbSbellard {
18083a7d929eSbellard     unsigned long ram_addr;
18093a7d929eSbellard     int dirty_flags;
18103a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18113a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18123a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18133a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18143a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
18153a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18163a7d929eSbellard #endif
18173a7d929eSbellard     }
1818c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
1819f32fc648Sbellard #ifdef USE_KQEMU
1820f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1821f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1822f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1823f32fc648Sbellard #endif
1824f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1825f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1826f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1827f23db169Sbellard        flushed */
1828f23db169Sbellard     if (dirty_flags == 0xff)
18296a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18301ccde1cbSbellard }
18311ccde1cbSbellard 
1832a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
18331ccde1cbSbellard {
18343a7d929eSbellard     unsigned long ram_addr;
18353a7d929eSbellard     int dirty_flags;
18363a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18373a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18383a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18393a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18403a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
18413a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18423a7d929eSbellard #endif
18433a7d929eSbellard     }
1844c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
1845f32fc648Sbellard #ifdef USE_KQEMU
1846f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1847f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1848f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1849f32fc648Sbellard #endif
1850f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1851f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1852f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1853f23db169Sbellard        flushed */
1854f23db169Sbellard     if (dirty_flags == 0xff)
18556a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18561ccde1cbSbellard }
18571ccde1cbSbellard 
1858a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
18591ccde1cbSbellard {
18603a7d929eSbellard     unsigned long ram_addr;
18613a7d929eSbellard     int dirty_flags;
18623a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18633a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18643a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18653a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18663a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
18673a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18683a7d929eSbellard #endif
18693a7d929eSbellard     }
1870c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
1871f32fc648Sbellard #ifdef USE_KQEMU
1872f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1873f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1874f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1875f32fc648Sbellard #endif
1876f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1877f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1878f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1879f23db169Sbellard        flushed */
1880f23db169Sbellard     if (dirty_flags == 0xff)
18816a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18821ccde1cbSbellard }
18831ccde1cbSbellard 
18843a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
18853a7d929eSbellard     NULL, /* never used */
18863a7d929eSbellard     NULL, /* never used */
18873a7d929eSbellard     NULL, /* never used */
18883a7d929eSbellard };
18893a7d929eSbellard 
18901ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
18911ccde1cbSbellard     notdirty_mem_writeb,
18921ccde1cbSbellard     notdirty_mem_writew,
18931ccde1cbSbellard     notdirty_mem_writel,
18941ccde1cbSbellard };
18951ccde1cbSbellard 
189633417e70Sbellard static void io_mem_init(void)
189733417e70Sbellard {
18983a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1899a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
19003a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
19011ccde1cbSbellard     io_mem_nb = 5;
19021ccde1cbSbellard 
19031ccde1cbSbellard     /* alloc dirty bits array */
19040a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
19053a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
190633417e70Sbellard }
190733417e70Sbellard 
190833417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
190933417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
191033417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
191133417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
191233417e70Sbellard    allocated. The return value can be used with
191333417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
191433417e70Sbellard int cpu_register_io_memory(int io_index,
191533417e70Sbellard                            CPUReadMemoryFunc **mem_read,
1916a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
1917a4193c8aSbellard                            void *opaque)
191833417e70Sbellard {
191933417e70Sbellard     int i;
192033417e70Sbellard 
192133417e70Sbellard     if (io_index <= 0) {
1922b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
192333417e70Sbellard             return -1;
192433417e70Sbellard         io_index = io_mem_nb++;
192533417e70Sbellard     } else {
192633417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
192733417e70Sbellard             return -1;
192833417e70Sbellard     }
192933417e70Sbellard 
193033417e70Sbellard     for(i = 0;i < 3; i++) {
193133417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
193233417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
193333417e70Sbellard     }
1934a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
193533417e70Sbellard     return io_index << IO_MEM_SHIFT;
193633417e70Sbellard }
193761382a50Sbellard 
19388926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
19398926b517Sbellard {
19408926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
19418926b517Sbellard }
19428926b517Sbellard 
19438926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
19448926b517Sbellard {
19458926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
19468926b517Sbellard }
19478926b517Sbellard 
194813eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
194913eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
19502e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
195113eb76e0Sbellard                             int len, int is_write)
195213eb76e0Sbellard {
195313eb76e0Sbellard     int l, flags;
195413eb76e0Sbellard     target_ulong page;
195553a5960aSpbrook     void * p;
195613eb76e0Sbellard 
195713eb76e0Sbellard     while (len > 0) {
195813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
195913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
196013eb76e0Sbellard         if (l > len)
196113eb76e0Sbellard             l = len;
196213eb76e0Sbellard         flags = page_get_flags(page);
196313eb76e0Sbellard         if (!(flags & PAGE_VALID))
196413eb76e0Sbellard             return;
196513eb76e0Sbellard         if (is_write) {
196613eb76e0Sbellard             if (!(flags & PAGE_WRITE))
196713eb76e0Sbellard                 return;
196853a5960aSpbrook             p = lock_user(addr, len, 0);
196953a5960aSpbrook             memcpy(p, buf, len);
197053a5960aSpbrook             unlock_user(p, addr, len);
197113eb76e0Sbellard         } else {
197213eb76e0Sbellard             if (!(flags & PAGE_READ))
197313eb76e0Sbellard                 return;
197453a5960aSpbrook             p = lock_user(addr, len, 1);
197553a5960aSpbrook             memcpy(buf, p, len);
197653a5960aSpbrook             unlock_user(p, addr, 0);
197713eb76e0Sbellard         }
197813eb76e0Sbellard         len -= l;
197913eb76e0Sbellard         buf += l;
198013eb76e0Sbellard         addr += l;
198113eb76e0Sbellard     }
198213eb76e0Sbellard }
19838df1cd07Sbellard 
198413eb76e0Sbellard #else
19852e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
198613eb76e0Sbellard                             int len, int is_write)
198713eb76e0Sbellard {
198813eb76e0Sbellard     int l, io_index;
198913eb76e0Sbellard     uint8_t *ptr;
199013eb76e0Sbellard     uint32_t val;
19912e12669aSbellard     target_phys_addr_t page;
19922e12669aSbellard     unsigned long pd;
199392e873b9Sbellard     PhysPageDesc *p;
199413eb76e0Sbellard 
199513eb76e0Sbellard     while (len > 0) {
199613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
199713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
199813eb76e0Sbellard         if (l > len)
199913eb76e0Sbellard             l = len;
200092e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
200113eb76e0Sbellard         if (!p) {
200213eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
200313eb76e0Sbellard         } else {
200413eb76e0Sbellard             pd = p->phys_offset;
200513eb76e0Sbellard         }
200613eb76e0Sbellard 
200713eb76e0Sbellard         if (is_write) {
20083a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
200913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20106a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
20116a00d601Sbellard                    potential bugs */
201213eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
20131c213d19Sbellard                     /* 32 bit write access */
2014c27004ecSbellard                     val = ldl_p(buf);
2015a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
201613eb76e0Sbellard                     l = 4;
201713eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
20181c213d19Sbellard                     /* 16 bit write access */
2019c27004ecSbellard                     val = lduw_p(buf);
2020a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
202113eb76e0Sbellard                     l = 2;
202213eb76e0Sbellard                 } else {
20231c213d19Sbellard                     /* 8 bit write access */
2024c27004ecSbellard                     val = ldub_p(buf);
2025a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
202613eb76e0Sbellard                     l = 1;
202713eb76e0Sbellard                 }
202813eb76e0Sbellard             } else {
2029b448f2f3Sbellard                 unsigned long addr1;
2030b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
203113eb76e0Sbellard                 /* RAM case */
2032b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
203313eb76e0Sbellard                 memcpy(ptr, buf, l);
20343a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2035b448f2f3Sbellard                     /* invalidate code */
2036b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2037b448f2f3Sbellard                     /* set dirty bit */
2038f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2039f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
204013eb76e0Sbellard                 }
20413a7d929eSbellard             }
204213eb76e0Sbellard         } else {
20433a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
204413eb76e0Sbellard                 /* I/O case */
204513eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
204613eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
204713eb76e0Sbellard                     /* 32 bit read access */
2048a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2049c27004ecSbellard                     stl_p(buf, val);
205013eb76e0Sbellard                     l = 4;
205113eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
205213eb76e0Sbellard                     /* 16 bit read access */
2053a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2054c27004ecSbellard                     stw_p(buf, val);
205513eb76e0Sbellard                     l = 2;
205613eb76e0Sbellard                 } else {
20571c213d19Sbellard                     /* 8 bit read access */
2058a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2059c27004ecSbellard                     stb_p(buf, val);
206013eb76e0Sbellard                     l = 1;
206113eb76e0Sbellard                 }
206213eb76e0Sbellard             } else {
206313eb76e0Sbellard                 /* RAM case */
206413eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
206513eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
206613eb76e0Sbellard                 memcpy(buf, ptr, l);
206713eb76e0Sbellard             }
206813eb76e0Sbellard         }
206913eb76e0Sbellard         len -= l;
207013eb76e0Sbellard         buf += l;
207113eb76e0Sbellard         addr += l;
207213eb76e0Sbellard     }
207313eb76e0Sbellard }
20748df1cd07Sbellard 
20758df1cd07Sbellard /* warning: addr must be aligned */
20768df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
20778df1cd07Sbellard {
20788df1cd07Sbellard     int io_index;
20798df1cd07Sbellard     uint8_t *ptr;
20808df1cd07Sbellard     uint32_t val;
20818df1cd07Sbellard     unsigned long pd;
20828df1cd07Sbellard     PhysPageDesc *p;
20838df1cd07Sbellard 
20848df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
20858df1cd07Sbellard     if (!p) {
20868df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
20878df1cd07Sbellard     } else {
20888df1cd07Sbellard         pd = p->phys_offset;
20898df1cd07Sbellard     }
20908df1cd07Sbellard 
20913a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
20928df1cd07Sbellard         /* I/O case */
20938df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20948df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
20958df1cd07Sbellard     } else {
20968df1cd07Sbellard         /* RAM case */
20978df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
20988df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
20998df1cd07Sbellard         val = ldl_p(ptr);
21008df1cd07Sbellard     }
21018df1cd07Sbellard     return val;
21028df1cd07Sbellard }
21038df1cd07Sbellard 
210484b7b8e7Sbellard /* warning: addr must be aligned */
210584b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
210684b7b8e7Sbellard {
210784b7b8e7Sbellard     int io_index;
210884b7b8e7Sbellard     uint8_t *ptr;
210984b7b8e7Sbellard     uint64_t val;
211084b7b8e7Sbellard     unsigned long pd;
211184b7b8e7Sbellard     PhysPageDesc *p;
211284b7b8e7Sbellard 
211384b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
211484b7b8e7Sbellard     if (!p) {
211584b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
211684b7b8e7Sbellard     } else {
211784b7b8e7Sbellard         pd = p->phys_offset;
211884b7b8e7Sbellard     }
211984b7b8e7Sbellard 
212084b7b8e7Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
212184b7b8e7Sbellard         /* I/O case */
212284b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
212384b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
212484b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
212584b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
212684b7b8e7Sbellard #else
212784b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
212884b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
212984b7b8e7Sbellard #endif
213084b7b8e7Sbellard     } else {
213184b7b8e7Sbellard         /* RAM case */
213284b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
213384b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
213484b7b8e7Sbellard         val = ldq_p(ptr);
213584b7b8e7Sbellard     }
213684b7b8e7Sbellard     return val;
213784b7b8e7Sbellard }
213884b7b8e7Sbellard 
2139aab33094Sbellard /* XXX: optimize */
2140aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2141aab33094Sbellard {
2142aab33094Sbellard     uint8_t val;
2143aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2144aab33094Sbellard     return val;
2145aab33094Sbellard }
2146aab33094Sbellard 
2147aab33094Sbellard /* XXX: optimize */
2148aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2149aab33094Sbellard {
2150aab33094Sbellard     uint16_t val;
2151aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2152aab33094Sbellard     return tswap16(val);
2153aab33094Sbellard }
2154aab33094Sbellard 
21558df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
21568df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
21578df1cd07Sbellard    bits are used to track modified PTEs */
21588df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
21598df1cd07Sbellard {
21608df1cd07Sbellard     int io_index;
21618df1cd07Sbellard     uint8_t *ptr;
21628df1cd07Sbellard     unsigned long pd;
21638df1cd07Sbellard     PhysPageDesc *p;
21648df1cd07Sbellard 
21658df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21668df1cd07Sbellard     if (!p) {
21678df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21688df1cd07Sbellard     } else {
21698df1cd07Sbellard         pd = p->phys_offset;
21708df1cd07Sbellard     }
21718df1cd07Sbellard 
21723a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
21738df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
21748df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
21758df1cd07Sbellard     } else {
21768df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
21778df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
21788df1cd07Sbellard         stl_p(ptr, val);
21798df1cd07Sbellard     }
21808df1cd07Sbellard }
21818df1cd07Sbellard 
21828df1cd07Sbellard /* warning: addr must be aligned */
21838df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
21848df1cd07Sbellard {
21858df1cd07Sbellard     int io_index;
21868df1cd07Sbellard     uint8_t *ptr;
21878df1cd07Sbellard     unsigned long pd;
21888df1cd07Sbellard     PhysPageDesc *p;
21898df1cd07Sbellard 
21908df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21918df1cd07Sbellard     if (!p) {
21928df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21938df1cd07Sbellard     } else {
21948df1cd07Sbellard         pd = p->phys_offset;
21958df1cd07Sbellard     }
21968df1cd07Sbellard 
21973a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
21988df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
21998df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
22008df1cd07Sbellard     } else {
22018df1cd07Sbellard         unsigned long addr1;
22028df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
22038df1cd07Sbellard         /* RAM case */
22048df1cd07Sbellard         ptr = phys_ram_base + addr1;
22058df1cd07Sbellard         stl_p(ptr, val);
22063a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
22078df1cd07Sbellard             /* invalidate code */
22088df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
22098df1cd07Sbellard             /* set dirty bit */
2210f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2211f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
22128df1cd07Sbellard         }
22138df1cd07Sbellard     }
22143a7d929eSbellard }
22158df1cd07Sbellard 
2216aab33094Sbellard /* XXX: optimize */
2217aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2218aab33094Sbellard {
2219aab33094Sbellard     uint8_t v = val;
2220aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2221aab33094Sbellard }
2222aab33094Sbellard 
2223aab33094Sbellard /* XXX: optimize */
2224aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2225aab33094Sbellard {
2226aab33094Sbellard     uint16_t v = tswap16(val);
2227aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2228aab33094Sbellard }
2229aab33094Sbellard 
2230aab33094Sbellard /* XXX: optimize */
2231aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2232aab33094Sbellard {
2233aab33094Sbellard     val = tswap64(val);
2234aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2235aab33094Sbellard }
2236aab33094Sbellard 
223713eb76e0Sbellard #endif
223813eb76e0Sbellard 
223913eb76e0Sbellard /* virtual memory access for debug */
2240b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2241b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
224213eb76e0Sbellard {
224313eb76e0Sbellard     int l;
224413eb76e0Sbellard     target_ulong page, phys_addr;
224513eb76e0Sbellard 
224613eb76e0Sbellard     while (len > 0) {
224713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
224813eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
224913eb76e0Sbellard         /* if no physical page mapped, return an error */
225013eb76e0Sbellard         if (phys_addr == -1)
225113eb76e0Sbellard             return -1;
225213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
225313eb76e0Sbellard         if (l > len)
225413eb76e0Sbellard             l = len;
2255b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2256b448f2f3Sbellard                                buf, l, is_write);
225713eb76e0Sbellard         len -= l;
225813eb76e0Sbellard         buf += l;
225913eb76e0Sbellard         addr += l;
226013eb76e0Sbellard     }
226113eb76e0Sbellard     return 0;
226213eb76e0Sbellard }
226313eb76e0Sbellard 
2264e3db7226Sbellard void dump_exec_info(FILE *f,
2265e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2266e3db7226Sbellard {
2267e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2268e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2269e3db7226Sbellard     TranslationBlock *tb;
2270e3db7226Sbellard 
2271e3db7226Sbellard     target_code_size = 0;
2272e3db7226Sbellard     max_target_code_size = 0;
2273e3db7226Sbellard     cross_page = 0;
2274e3db7226Sbellard     direct_jmp_count = 0;
2275e3db7226Sbellard     direct_jmp2_count = 0;
2276e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2277e3db7226Sbellard         tb = &tbs[i];
2278e3db7226Sbellard         target_code_size += tb->size;
2279e3db7226Sbellard         if (tb->size > max_target_code_size)
2280e3db7226Sbellard             max_target_code_size = tb->size;
2281e3db7226Sbellard         if (tb->page_addr[1] != -1)
2282e3db7226Sbellard             cross_page++;
2283e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2284e3db7226Sbellard             direct_jmp_count++;
2285e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2286e3db7226Sbellard                 direct_jmp2_count++;
2287e3db7226Sbellard             }
2288e3db7226Sbellard         }
2289e3db7226Sbellard     }
2290e3db7226Sbellard     /* XXX: avoid using doubles ? */
2291e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2292e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2293e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2294e3db7226Sbellard                 max_target_code_size);
2295e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2296e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2297e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2298e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2299e3db7226Sbellard             cross_page,
2300e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2301e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2302e3db7226Sbellard                 direct_jmp_count,
2303e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2304e3db7226Sbellard                 direct_jmp2_count,
2305e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2306e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2307e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2308e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2309e3db7226Sbellard }
2310e3db7226Sbellard 
231161382a50Sbellard #if !defined(CONFIG_USER_ONLY)
231261382a50Sbellard 
231361382a50Sbellard #define MMUSUFFIX _cmmu
231461382a50Sbellard #define GETPC() NULL
231561382a50Sbellard #define env cpu_single_env
2316b769d8feSbellard #define SOFTMMU_CODE_ACCESS
231761382a50Sbellard 
231861382a50Sbellard #define SHIFT 0
231961382a50Sbellard #include "softmmu_template.h"
232061382a50Sbellard 
232161382a50Sbellard #define SHIFT 1
232261382a50Sbellard #include "softmmu_template.h"
232361382a50Sbellard 
232461382a50Sbellard #define SHIFT 2
232561382a50Sbellard #include "softmmu_template.h"
232661382a50Sbellard 
232761382a50Sbellard #define SHIFT 3
232861382a50Sbellard #include "softmmu_template.h"
232961382a50Sbellard 
233061382a50Sbellard #undef env
233161382a50Sbellard 
233261382a50Sbellard #endif
2333