xref: /qemu/system/physmem.c (revision e3f4e2a4b0df510e441badb85c9398516c27bd66)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
3953a5960aSpbrook #endif
4054936004Sbellard 
41fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4266e85a21Sbellard //#define DEBUG_FLUSH
439fa3e853Sbellard //#define DEBUG_TLB
44fd6ce8f6Sbellard 
45fd6ce8f6Sbellard /* make various TB consistency checks */
46fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4798857888Sbellard //#define DEBUG_TLB_CHECK
48fd6ce8f6Sbellard 
49fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
50fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51fd6ce8f6Sbellard 
529fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
539fa3e853Sbellard 
549fa3e853Sbellard #define MMAP_AREA_START        0x00000000
559fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
56fd6ce8f6Sbellard 
57108c49b8Sbellard #if defined(TARGET_SPARC64)
58108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
59108c49b8Sbellard #elif defined(TARGET_PPC64)
60108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
61108c49b8Sbellard #else
62108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
64108c49b8Sbellard #endif
65108c49b8Sbellard 
66fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
679fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68fd6ce8f6Sbellard int nb_tbs;
69eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
70eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
71fd6ce8f6Sbellard 
72b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
73fd6ce8f6Sbellard uint8_t *code_gen_ptr;
74fd6ce8f6Sbellard 
759fa3e853Sbellard int phys_ram_size;
769fa3e853Sbellard int phys_ram_fd;
779fa3e853Sbellard uint8_t *phys_ram_base;
781ccde1cbSbellard uint8_t *phys_ram_dirty;
799fa3e853Sbellard 
806a00d601Sbellard CPUState *first_cpu;
816a00d601Sbellard /* current CPU in the current thread. It is only valid inside
826a00d601Sbellard    cpu_exec() */
836a00d601Sbellard CPUState *cpu_single_env;
846a00d601Sbellard 
8554936004Sbellard typedef struct PageDesc {
8692e873b9Sbellard     /* list of TBs intersecting this ram page */
87fd6ce8f6Sbellard     TranslationBlock *first_tb;
889fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
899fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
909fa3e853Sbellard     unsigned int code_write_count;
919fa3e853Sbellard     uint8_t *code_bitmap;
929fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
939fa3e853Sbellard     unsigned long flags;
949fa3e853Sbellard #endif
9554936004Sbellard } PageDesc;
9654936004Sbellard 
9792e873b9Sbellard typedef struct PhysPageDesc {
9892e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
99e04f40b5Sbellard     uint32_t phys_offset;
10092e873b9Sbellard } PhysPageDesc;
10192e873b9Sbellard 
10254936004Sbellard #define L2_BITS 10
10354936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
10454936004Sbellard 
10554936004Sbellard #define L1_SIZE (1 << L1_BITS)
10654936004Sbellard #define L2_SIZE (1 << L2_BITS)
10754936004Sbellard 
10833417e70Sbellard static void io_mem_init(void);
109fd6ce8f6Sbellard 
11083fb7adfSbellard unsigned long qemu_real_host_page_size;
11183fb7adfSbellard unsigned long qemu_host_page_bits;
11283fb7adfSbellard unsigned long qemu_host_page_size;
11383fb7adfSbellard unsigned long qemu_host_page_mask;
11454936004Sbellard 
11592e873b9Sbellard /* XXX: for system emulation, it could just be an array */
11654936004Sbellard static PageDesc *l1_map[L1_SIZE];
1170a962c02Sbellard PhysPageDesc **l1_phys_map;
11854936004Sbellard 
11933417e70Sbellard /* io memory support */
12033417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
12133417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
12333417e70Sbellard static int io_mem_nb;
12433417e70Sbellard 
12534865134Sbellard /* log support */
12634865134Sbellard char *logfilename = "/tmp/qemu.log";
12734865134Sbellard FILE *logfile;
12834865134Sbellard int loglevel;
12934865134Sbellard 
130e3db7226Sbellard /* statistics */
131e3db7226Sbellard static int tlb_flush_count;
132e3db7226Sbellard static int tb_flush_count;
133e3db7226Sbellard static int tb_phys_invalidate_count;
134e3db7226Sbellard 
135b346ff46Sbellard static void page_init(void)
13654936004Sbellard {
13783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
13854936004Sbellard        TARGET_PAGE_SIZE */
13967b915a5Sbellard #ifdef _WIN32
140d5a8f07cSbellard     {
141d5a8f07cSbellard         SYSTEM_INFO system_info;
142d5a8f07cSbellard         DWORD old_protect;
143d5a8f07cSbellard 
144d5a8f07cSbellard         GetSystemInfo(&system_info);
145d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
146d5a8f07cSbellard 
147d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
149d5a8f07cSbellard     }
15067b915a5Sbellard #else
15183fb7adfSbellard     qemu_real_host_page_size = getpagesize();
152d5a8f07cSbellard     {
153d5a8f07cSbellard         unsigned long start, end;
154d5a8f07cSbellard 
155d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
156d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
157d5a8f07cSbellard 
158d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
160d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
161d5a8f07cSbellard 
162d5a8f07cSbellard         mprotect((void *)start, end - start,
163d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
164d5a8f07cSbellard     }
16567b915a5Sbellard #endif
166d5a8f07cSbellard 
16783fb7adfSbellard     if (qemu_host_page_size == 0)
16883fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
16983fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
17083fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
17183fb7adfSbellard     qemu_host_page_bits = 0;
17283fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
17383fb7adfSbellard         qemu_host_page_bits++;
17483fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
175108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
17754936004Sbellard }
17854936004Sbellard 
179fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
18054936004Sbellard {
18154936004Sbellard     PageDesc **lp, *p;
18254936004Sbellard 
18354936004Sbellard     lp = &l1_map[index >> L2_BITS];
18454936004Sbellard     p = *lp;
18554936004Sbellard     if (!p) {
18654936004Sbellard         /* allocate if not found */
18759817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
188fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
18954936004Sbellard         *lp = p;
19054936004Sbellard     }
19154936004Sbellard     return p + (index & (L2_SIZE - 1));
19254936004Sbellard }
19354936004Sbellard 
194fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
19554936004Sbellard {
19654936004Sbellard     PageDesc *p;
19754936004Sbellard 
19854936004Sbellard     p = l1_map[index >> L2_BITS];
19954936004Sbellard     if (!p)
20054936004Sbellard         return 0;
201fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
20254936004Sbellard }
20354936004Sbellard 
204108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
20592e873b9Sbellard {
206108c49b8Sbellard     void **lp, **p;
207e3f4e2a4Spbrook     PhysPageDesc *pd;
20892e873b9Sbellard 
209108c49b8Sbellard     p = (void **)l1_phys_map;
210108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
211108c49b8Sbellard 
212108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
213108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
214108c49b8Sbellard #endif
215108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
21692e873b9Sbellard     p = *lp;
21792e873b9Sbellard     if (!p) {
21892e873b9Sbellard         /* allocate if not found */
219108c49b8Sbellard         if (!alloc)
220108c49b8Sbellard             return NULL;
221108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
222108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
223108c49b8Sbellard         *lp = p;
224108c49b8Sbellard     }
225108c49b8Sbellard #endif
226108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
227e3f4e2a4Spbrook     pd = *lp;
228e3f4e2a4Spbrook     if (!pd) {
229e3f4e2a4Spbrook         int i;
230108c49b8Sbellard         /* allocate if not found */
231108c49b8Sbellard         if (!alloc)
232108c49b8Sbellard             return NULL;
233e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
234e3f4e2a4Spbrook         *lp = pd;
235e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
236e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
23792e873b9Sbellard     }
238e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
23992e873b9Sbellard }
24092e873b9Sbellard 
241108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
24292e873b9Sbellard {
243108c49b8Sbellard     return phys_page_find_alloc(index, 0);
24492e873b9Sbellard }
24592e873b9Sbellard 
2469fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2476a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
2483a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2493a7d929eSbellard                                     target_ulong vaddr);
2509fa3e853Sbellard #endif
251fd6ce8f6Sbellard 
2526a00d601Sbellard void cpu_exec_init(CPUState *env)
253fd6ce8f6Sbellard {
2546a00d601Sbellard     CPUState **penv;
2556a00d601Sbellard     int cpu_index;
2566a00d601Sbellard 
257fd6ce8f6Sbellard     if (!code_gen_ptr) {
258fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
259b346ff46Sbellard         page_init();
26033417e70Sbellard         io_mem_init();
261fd6ce8f6Sbellard     }
2626a00d601Sbellard     env->next_cpu = NULL;
2636a00d601Sbellard     penv = &first_cpu;
2646a00d601Sbellard     cpu_index = 0;
2656a00d601Sbellard     while (*penv != NULL) {
2666a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
2676a00d601Sbellard         cpu_index++;
2686a00d601Sbellard     }
2696a00d601Sbellard     env->cpu_index = cpu_index;
2706a00d601Sbellard     *penv = env;
271fd6ce8f6Sbellard }
272fd6ce8f6Sbellard 
2739fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
2749fa3e853Sbellard {
2759fa3e853Sbellard     if (p->code_bitmap) {
27659817ccbSbellard         qemu_free(p->code_bitmap);
2779fa3e853Sbellard         p->code_bitmap = NULL;
2789fa3e853Sbellard     }
2799fa3e853Sbellard     p->code_write_count = 0;
2809fa3e853Sbellard }
2819fa3e853Sbellard 
282fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
283fd6ce8f6Sbellard static void page_flush_tb(void)
284fd6ce8f6Sbellard {
285fd6ce8f6Sbellard     int i, j;
286fd6ce8f6Sbellard     PageDesc *p;
287fd6ce8f6Sbellard 
288fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
289fd6ce8f6Sbellard         p = l1_map[i];
290fd6ce8f6Sbellard         if (p) {
2919fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
2929fa3e853Sbellard                 p->first_tb = NULL;
2939fa3e853Sbellard                 invalidate_page_bitmap(p);
2949fa3e853Sbellard                 p++;
2959fa3e853Sbellard             }
296fd6ce8f6Sbellard         }
297fd6ce8f6Sbellard     }
298fd6ce8f6Sbellard }
299fd6ce8f6Sbellard 
300fd6ce8f6Sbellard /* flush all the translation blocks */
301d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
3026a00d601Sbellard void tb_flush(CPUState *env1)
303fd6ce8f6Sbellard {
3046a00d601Sbellard     CPUState *env;
3050124311eSbellard #if defined(DEBUG_FLUSH)
306fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
307fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
308fd6ce8f6Sbellard            nb_tbs,
3090124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
310fd6ce8f6Sbellard #endif
311fd6ce8f6Sbellard     nb_tbs = 0;
3126a00d601Sbellard 
3136a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3148a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3156a00d601Sbellard     }
3169fa3e853Sbellard 
3178a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
318fd6ce8f6Sbellard     page_flush_tb();
3199fa3e853Sbellard 
320fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
321d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
322d4e8164fSbellard        expensive */
323e3db7226Sbellard     tb_flush_count++;
324fd6ce8f6Sbellard }
325fd6ce8f6Sbellard 
326fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
327fd6ce8f6Sbellard 
328fd6ce8f6Sbellard static void tb_invalidate_check(unsigned long address)
329fd6ce8f6Sbellard {
330fd6ce8f6Sbellard     TranslationBlock *tb;
331fd6ce8f6Sbellard     int i;
332fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
333fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
334fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
335fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
336fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
337fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
338fd6ce8f6Sbellard                        address, tb->pc, tb->size);
339fd6ce8f6Sbellard             }
340fd6ce8f6Sbellard         }
341fd6ce8f6Sbellard     }
342fd6ce8f6Sbellard }
343fd6ce8f6Sbellard 
344fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
345fd6ce8f6Sbellard static void tb_page_check(void)
346fd6ce8f6Sbellard {
347fd6ce8f6Sbellard     TranslationBlock *tb;
348fd6ce8f6Sbellard     int i, flags1, flags2;
349fd6ce8f6Sbellard 
350fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
351fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
352fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
353fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
354fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
355fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
356fd6ce8f6Sbellard                        tb->pc, tb->size, flags1, flags2);
357fd6ce8f6Sbellard             }
358fd6ce8f6Sbellard         }
359fd6ce8f6Sbellard     }
360fd6ce8f6Sbellard }
361fd6ce8f6Sbellard 
362d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
363d4e8164fSbellard {
364d4e8164fSbellard     TranslationBlock *tb1;
365d4e8164fSbellard     unsigned int n1;
366d4e8164fSbellard 
367d4e8164fSbellard     /* suppress any remaining jumps to this TB */
368d4e8164fSbellard     tb1 = tb->jmp_first;
369d4e8164fSbellard     for(;;) {
370d4e8164fSbellard         n1 = (long)tb1 & 3;
371d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
372d4e8164fSbellard         if (n1 == 2)
373d4e8164fSbellard             break;
374d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
375d4e8164fSbellard     }
376d4e8164fSbellard     /* check end of list */
377d4e8164fSbellard     if (tb1 != tb) {
378d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
379d4e8164fSbellard     }
380d4e8164fSbellard }
381d4e8164fSbellard 
382fd6ce8f6Sbellard #endif
383fd6ce8f6Sbellard 
384fd6ce8f6Sbellard /* invalidate one TB */
385fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
386fd6ce8f6Sbellard                              int next_offset)
387fd6ce8f6Sbellard {
388fd6ce8f6Sbellard     TranslationBlock *tb1;
389fd6ce8f6Sbellard     for(;;) {
390fd6ce8f6Sbellard         tb1 = *ptb;
391fd6ce8f6Sbellard         if (tb1 == tb) {
392fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
393fd6ce8f6Sbellard             break;
394fd6ce8f6Sbellard         }
395fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
396fd6ce8f6Sbellard     }
397fd6ce8f6Sbellard }
398fd6ce8f6Sbellard 
3999fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
4009fa3e853Sbellard {
4019fa3e853Sbellard     TranslationBlock *tb1;
4029fa3e853Sbellard     unsigned int n1;
4039fa3e853Sbellard 
4049fa3e853Sbellard     for(;;) {
4059fa3e853Sbellard         tb1 = *ptb;
4069fa3e853Sbellard         n1 = (long)tb1 & 3;
4079fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4089fa3e853Sbellard         if (tb1 == tb) {
4099fa3e853Sbellard             *ptb = tb1->page_next[n1];
4109fa3e853Sbellard             break;
4119fa3e853Sbellard         }
4129fa3e853Sbellard         ptb = &tb1->page_next[n1];
4139fa3e853Sbellard     }
4149fa3e853Sbellard }
4159fa3e853Sbellard 
416d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
417d4e8164fSbellard {
418d4e8164fSbellard     TranslationBlock *tb1, **ptb;
419d4e8164fSbellard     unsigned int n1;
420d4e8164fSbellard 
421d4e8164fSbellard     ptb = &tb->jmp_next[n];
422d4e8164fSbellard     tb1 = *ptb;
423d4e8164fSbellard     if (tb1) {
424d4e8164fSbellard         /* find tb(n) in circular list */
425d4e8164fSbellard         for(;;) {
426d4e8164fSbellard             tb1 = *ptb;
427d4e8164fSbellard             n1 = (long)tb1 & 3;
428d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
429d4e8164fSbellard             if (n1 == n && tb1 == tb)
430d4e8164fSbellard                 break;
431d4e8164fSbellard             if (n1 == 2) {
432d4e8164fSbellard                 ptb = &tb1->jmp_first;
433d4e8164fSbellard             } else {
434d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
435d4e8164fSbellard             }
436d4e8164fSbellard         }
437d4e8164fSbellard         /* now we can suppress tb(n) from the list */
438d4e8164fSbellard         *ptb = tb->jmp_next[n];
439d4e8164fSbellard 
440d4e8164fSbellard         tb->jmp_next[n] = NULL;
441d4e8164fSbellard     }
442d4e8164fSbellard }
443d4e8164fSbellard 
444d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
445d4e8164fSbellard    another TB */
446d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
447d4e8164fSbellard {
448d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
449d4e8164fSbellard }
450d4e8164fSbellard 
4519fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
452fd6ce8f6Sbellard {
4536a00d601Sbellard     CPUState *env;
454fd6ce8f6Sbellard     PageDesc *p;
4558a40a180Sbellard     unsigned int h, n1;
4569fa3e853Sbellard     target_ulong phys_pc;
4578a40a180Sbellard     TranslationBlock *tb1, *tb2;
458fd6ce8f6Sbellard 
4599fa3e853Sbellard     /* remove the TB from the hash list */
4609fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4619fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4629fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4639fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
4649fa3e853Sbellard 
4659fa3e853Sbellard     /* remove the TB from the page list */
4669fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
4679fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
4689fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4699fa3e853Sbellard         invalidate_page_bitmap(p);
4709fa3e853Sbellard     }
4719fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
4729fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
4739fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4749fa3e853Sbellard         invalidate_page_bitmap(p);
4759fa3e853Sbellard     }
4769fa3e853Sbellard 
4778a40a180Sbellard     tb_invalidated_flag = 1;
4788a40a180Sbellard 
4798a40a180Sbellard     /* remove the TB from the hash list */
4808a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
4816a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
4826a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
4836a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
4846a00d601Sbellard     }
4858a40a180Sbellard 
4868a40a180Sbellard     /* suppress this TB from the two jump lists */
4878a40a180Sbellard     tb_jmp_remove(tb, 0);
4888a40a180Sbellard     tb_jmp_remove(tb, 1);
4898a40a180Sbellard 
4908a40a180Sbellard     /* suppress any remaining jumps to this TB */
4918a40a180Sbellard     tb1 = tb->jmp_first;
4928a40a180Sbellard     for(;;) {
4938a40a180Sbellard         n1 = (long)tb1 & 3;
4948a40a180Sbellard         if (n1 == 2)
4958a40a180Sbellard             break;
4968a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4978a40a180Sbellard         tb2 = tb1->jmp_next[n1];
4988a40a180Sbellard         tb_reset_jump(tb1, n1);
4998a40a180Sbellard         tb1->jmp_next[n1] = NULL;
5008a40a180Sbellard         tb1 = tb2;
5018a40a180Sbellard     }
5028a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5038a40a180Sbellard 
504e3db7226Sbellard     tb_phys_invalidate_count++;
5059fa3e853Sbellard }
5069fa3e853Sbellard 
5079fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5089fa3e853Sbellard {
5099fa3e853Sbellard     int end, mask, end1;
5109fa3e853Sbellard 
5119fa3e853Sbellard     end = start + len;
5129fa3e853Sbellard     tab += start >> 3;
5139fa3e853Sbellard     mask = 0xff << (start & 7);
5149fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5159fa3e853Sbellard         if (start < end) {
5169fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5179fa3e853Sbellard             *tab |= mask;
5189fa3e853Sbellard         }
5199fa3e853Sbellard     } else {
5209fa3e853Sbellard         *tab++ |= mask;
5219fa3e853Sbellard         start = (start + 8) & ~7;
5229fa3e853Sbellard         end1 = end & ~7;
5239fa3e853Sbellard         while (start < end1) {
5249fa3e853Sbellard             *tab++ = 0xff;
5259fa3e853Sbellard             start += 8;
5269fa3e853Sbellard         }
5279fa3e853Sbellard         if (start < end) {
5289fa3e853Sbellard             mask = ~(0xff << (end & 7));
5299fa3e853Sbellard             *tab |= mask;
5309fa3e853Sbellard         }
5319fa3e853Sbellard     }
5329fa3e853Sbellard }
5339fa3e853Sbellard 
5349fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5359fa3e853Sbellard {
5369fa3e853Sbellard     int n, tb_start, tb_end;
5379fa3e853Sbellard     TranslationBlock *tb;
5389fa3e853Sbellard 
53959817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5409fa3e853Sbellard     if (!p->code_bitmap)
5419fa3e853Sbellard         return;
5429fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5439fa3e853Sbellard 
5449fa3e853Sbellard     tb = p->first_tb;
5459fa3e853Sbellard     while (tb != NULL) {
5469fa3e853Sbellard         n = (long)tb & 3;
5479fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5489fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5499fa3e853Sbellard         if (n == 0) {
5509fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5519fa3e853Sbellard                it is not a problem */
5529fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5539fa3e853Sbellard             tb_end = tb_start + tb->size;
5549fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5559fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5569fa3e853Sbellard         } else {
5579fa3e853Sbellard             tb_start = 0;
5589fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5599fa3e853Sbellard         }
5609fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5619fa3e853Sbellard         tb = tb->page_next[n];
5629fa3e853Sbellard     }
5639fa3e853Sbellard }
5649fa3e853Sbellard 
565d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
566d720b93dSbellard 
567d720b93dSbellard static void tb_gen_code(CPUState *env,
568d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
569d720b93dSbellard                         int cflags)
570d720b93dSbellard {
571d720b93dSbellard     TranslationBlock *tb;
572d720b93dSbellard     uint8_t *tc_ptr;
573d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
574d720b93dSbellard     int code_gen_size;
575d720b93dSbellard 
576c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
577c27004ecSbellard     tb = tb_alloc(pc);
578d720b93dSbellard     if (!tb) {
579d720b93dSbellard         /* flush must be done */
580d720b93dSbellard         tb_flush(env);
581d720b93dSbellard         /* cannot fail at this point */
582c27004ecSbellard         tb = tb_alloc(pc);
583d720b93dSbellard     }
584d720b93dSbellard     tc_ptr = code_gen_ptr;
585d720b93dSbellard     tb->tc_ptr = tc_ptr;
586d720b93dSbellard     tb->cs_base = cs_base;
587d720b93dSbellard     tb->flags = flags;
588d720b93dSbellard     tb->cflags = cflags;
589d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
590d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
591d720b93dSbellard 
592d720b93dSbellard     /* check next page if needed */
593c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
594d720b93dSbellard     phys_page2 = -1;
595c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
596d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
597d720b93dSbellard     }
598d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
599d720b93dSbellard }
600d720b93dSbellard #endif
601d720b93dSbellard 
6029fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6039fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
604d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
605d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
606d720b93dSbellard    TB if code is modified inside this TB. */
607d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
608d720b93dSbellard                                    int is_cpu_write_access)
6099fa3e853Sbellard {
610d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
611d720b93dSbellard     CPUState *env = cpu_single_env;
6129fa3e853Sbellard     PageDesc *p;
613ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6149fa3e853Sbellard     target_ulong tb_start, tb_end;
615d720b93dSbellard     target_ulong current_pc, current_cs_base;
6169fa3e853Sbellard 
6179fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6189fa3e853Sbellard     if (!p)
6199fa3e853Sbellard         return;
6209fa3e853Sbellard     if (!p->code_bitmap &&
621d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
622d720b93dSbellard         is_cpu_write_access) {
6239fa3e853Sbellard         /* build code bitmap */
6249fa3e853Sbellard         build_page_bitmap(p);
6259fa3e853Sbellard     }
6269fa3e853Sbellard 
6279fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6289fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
629d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
630d720b93dSbellard     current_tb_modified = 0;
631d720b93dSbellard     current_tb = NULL; /* avoid warning */
632d720b93dSbellard     current_pc = 0; /* avoid warning */
633d720b93dSbellard     current_cs_base = 0; /* avoid warning */
634d720b93dSbellard     current_flags = 0; /* avoid warning */
6359fa3e853Sbellard     tb = p->first_tb;
6369fa3e853Sbellard     while (tb != NULL) {
6379fa3e853Sbellard         n = (long)tb & 3;
6389fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6399fa3e853Sbellard         tb_next = tb->page_next[n];
6409fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6419fa3e853Sbellard         if (n == 0) {
6429fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6439fa3e853Sbellard                it is not a problem */
6449fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6459fa3e853Sbellard             tb_end = tb_start + tb->size;
6469fa3e853Sbellard         } else {
6479fa3e853Sbellard             tb_start = tb->page_addr[1];
6489fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6499fa3e853Sbellard         }
6509fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
651d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
652d720b93dSbellard             if (current_tb_not_found) {
653d720b93dSbellard                 current_tb_not_found = 0;
654d720b93dSbellard                 current_tb = NULL;
655d720b93dSbellard                 if (env->mem_write_pc) {
656d720b93dSbellard                     /* now we have a real cpu fault */
657d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
658d720b93dSbellard                 }
659d720b93dSbellard             }
660d720b93dSbellard             if (current_tb == tb &&
661d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
662d720b93dSbellard                 /* If we are modifying the current TB, we must stop
663d720b93dSbellard                 its execution. We could be more precise by checking
664d720b93dSbellard                 that the modification is after the current PC, but it
665d720b93dSbellard                 would require a specialized function to partially
666d720b93dSbellard                 restore the CPU state */
667d720b93dSbellard 
668d720b93dSbellard                 current_tb_modified = 1;
669d720b93dSbellard                 cpu_restore_state(current_tb, env,
670d720b93dSbellard                                   env->mem_write_pc, NULL);
671d720b93dSbellard #if defined(TARGET_I386)
672d720b93dSbellard                 current_flags = env->hflags;
673d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
674d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
675d720b93dSbellard                 current_pc = current_cs_base + env->eip;
676d720b93dSbellard #else
677d720b93dSbellard #error unsupported CPU
678d720b93dSbellard #endif
679d720b93dSbellard             }
680d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
6816f5a9f7eSbellard             /* we need to do that to handle the case where a signal
6826f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
6836f5a9f7eSbellard             saved_tb = NULL;
6846f5a9f7eSbellard             if (env) {
685ea1c1802Sbellard                 saved_tb = env->current_tb;
686ea1c1802Sbellard                 env->current_tb = NULL;
6876f5a9f7eSbellard             }
6889fa3e853Sbellard             tb_phys_invalidate(tb, -1);
6896f5a9f7eSbellard             if (env) {
690ea1c1802Sbellard                 env->current_tb = saved_tb;
691ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
692ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
6939fa3e853Sbellard             }
6946f5a9f7eSbellard         }
6959fa3e853Sbellard         tb = tb_next;
6969fa3e853Sbellard     }
6979fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
6989fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
6999fa3e853Sbellard     if (!p->first_tb) {
7009fa3e853Sbellard         invalidate_page_bitmap(p);
701d720b93dSbellard         if (is_cpu_write_access) {
702d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
703d720b93dSbellard         }
704d720b93dSbellard     }
705d720b93dSbellard #endif
706d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
707d720b93dSbellard     if (current_tb_modified) {
708d720b93dSbellard         /* we generate a block containing just the instruction
709d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
710d720b93dSbellard            itself */
711ea1c1802Sbellard         env->current_tb = NULL;
712d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
713d720b93dSbellard                     CF_SINGLE_INSN);
714d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7159fa3e853Sbellard     }
7169fa3e853Sbellard #endif
7179fa3e853Sbellard }
7189fa3e853Sbellard 
7199fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
720d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7219fa3e853Sbellard {
7229fa3e853Sbellard     PageDesc *p;
7239fa3e853Sbellard     int offset, b;
72459817ccbSbellard #if 0
725a4193c8aSbellard     if (1) {
726a4193c8aSbellard         if (loglevel) {
727a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
728a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
729a4193c8aSbellard                    cpu_single_env->eip,
730a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
731a4193c8aSbellard         }
73259817ccbSbellard     }
73359817ccbSbellard #endif
7349fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7359fa3e853Sbellard     if (!p)
7369fa3e853Sbellard         return;
7379fa3e853Sbellard     if (p->code_bitmap) {
7389fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7399fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7409fa3e853Sbellard         if (b & ((1 << len) - 1))
7419fa3e853Sbellard             goto do_invalidate;
7429fa3e853Sbellard     } else {
7439fa3e853Sbellard     do_invalidate:
744d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7459fa3e853Sbellard     }
7469fa3e853Sbellard }
7479fa3e853Sbellard 
7489fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
749d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
750d720b93dSbellard                                     unsigned long pc, void *puc)
7519fa3e853Sbellard {
752d720b93dSbellard     int n, current_flags, current_tb_modified;
753d720b93dSbellard     target_ulong current_pc, current_cs_base;
7549fa3e853Sbellard     PageDesc *p;
755d720b93dSbellard     TranslationBlock *tb, *current_tb;
756d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
757d720b93dSbellard     CPUState *env = cpu_single_env;
758d720b93dSbellard #endif
7599fa3e853Sbellard 
7609fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7619fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
762fd6ce8f6Sbellard     if (!p)
763fd6ce8f6Sbellard         return;
764fd6ce8f6Sbellard     tb = p->first_tb;
765d720b93dSbellard     current_tb_modified = 0;
766d720b93dSbellard     current_tb = NULL;
767d720b93dSbellard     current_pc = 0; /* avoid warning */
768d720b93dSbellard     current_cs_base = 0; /* avoid warning */
769d720b93dSbellard     current_flags = 0; /* avoid warning */
770d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
771d720b93dSbellard     if (tb && pc != 0) {
772d720b93dSbellard         current_tb = tb_find_pc(pc);
773d720b93dSbellard     }
774d720b93dSbellard #endif
775fd6ce8f6Sbellard     while (tb != NULL) {
7769fa3e853Sbellard         n = (long)tb & 3;
7779fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
778d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
779d720b93dSbellard         if (current_tb == tb &&
780d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
781d720b93dSbellard                 /* If we are modifying the current TB, we must stop
782d720b93dSbellard                    its execution. We could be more precise by checking
783d720b93dSbellard                    that the modification is after the current PC, but it
784d720b93dSbellard                    would require a specialized function to partially
785d720b93dSbellard                    restore the CPU state */
786d720b93dSbellard 
787d720b93dSbellard             current_tb_modified = 1;
788d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
789d720b93dSbellard #if defined(TARGET_I386)
790d720b93dSbellard             current_flags = env->hflags;
791d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
792d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
793d720b93dSbellard             current_pc = current_cs_base + env->eip;
794d720b93dSbellard #else
795d720b93dSbellard #error unsupported CPU
796d720b93dSbellard #endif
797d720b93dSbellard         }
798d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7999fa3e853Sbellard         tb_phys_invalidate(tb, addr);
8009fa3e853Sbellard         tb = tb->page_next[n];
801fd6ce8f6Sbellard     }
802fd6ce8f6Sbellard     p->first_tb = NULL;
803d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
804d720b93dSbellard     if (current_tb_modified) {
805d720b93dSbellard         /* we generate a block containing just the instruction
806d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
807d720b93dSbellard            itself */
808ea1c1802Sbellard         env->current_tb = NULL;
809d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
810d720b93dSbellard                     CF_SINGLE_INSN);
811d720b93dSbellard         cpu_resume_from_signal(env, puc);
812d720b93dSbellard     }
813d720b93dSbellard #endif
814fd6ce8f6Sbellard }
8159fa3e853Sbellard #endif
816fd6ce8f6Sbellard 
817fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8189fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
81953a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
820fd6ce8f6Sbellard {
821fd6ce8f6Sbellard     PageDesc *p;
8229fa3e853Sbellard     TranslationBlock *last_first_tb;
8239fa3e853Sbellard 
8249fa3e853Sbellard     tb->page_addr[n] = page_addr;
8253a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8269fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8279fa3e853Sbellard     last_first_tb = p->first_tb;
8289fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8299fa3e853Sbellard     invalidate_page_bitmap(p);
8309fa3e853Sbellard 
831107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
832d720b93dSbellard 
8339fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8349fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
83553a5960aSpbrook         target_ulong addr;
83653a5960aSpbrook         PageDesc *p2;
837fd6ce8f6Sbellard         int prot;
838fd6ce8f6Sbellard 
839fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
840fd6ce8f6Sbellard            page fault + mprotect overhead) */
84153a5960aSpbrook         page_addr &= qemu_host_page_mask;
842fd6ce8f6Sbellard         prot = 0;
84353a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
84453a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
84553a5960aSpbrook 
84653a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
84753a5960aSpbrook             if (!p2)
84853a5960aSpbrook                 continue;
84953a5960aSpbrook             prot |= p2->flags;
85053a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
85153a5960aSpbrook             page_get_flags(addr);
85253a5960aSpbrook           }
85353a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
854fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
855fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
856fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
85753a5960aSpbrook                page_addr);
858fd6ce8f6Sbellard #endif
859fd6ce8f6Sbellard     }
8609fa3e853Sbellard #else
8619fa3e853Sbellard     /* if some code is already present, then the pages are already
8629fa3e853Sbellard        protected. So we handle the case where only the first TB is
8639fa3e853Sbellard        allocated in a physical page */
8649fa3e853Sbellard     if (!last_first_tb) {
8656a00d601Sbellard         tlb_protect_code(page_addr);
8669fa3e853Sbellard     }
8679fa3e853Sbellard #endif
868d720b93dSbellard 
869d720b93dSbellard #endif /* TARGET_HAS_SMC */
870fd6ce8f6Sbellard }
871fd6ce8f6Sbellard 
872fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
873fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
874c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
875fd6ce8f6Sbellard {
876fd6ce8f6Sbellard     TranslationBlock *tb;
877fd6ce8f6Sbellard 
878fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
879fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
880d4e8164fSbellard         return NULL;
881fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
882fd6ce8f6Sbellard     tb->pc = pc;
883b448f2f3Sbellard     tb->cflags = 0;
884d4e8164fSbellard     return tb;
885d4e8164fSbellard }
886d4e8164fSbellard 
8879fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
8889fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
8899fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
8909fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
891d4e8164fSbellard {
8929fa3e853Sbellard     unsigned int h;
8939fa3e853Sbellard     TranslationBlock **ptb;
8949fa3e853Sbellard 
8959fa3e853Sbellard     /* add in the physical hash table */
8969fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8979fa3e853Sbellard     ptb = &tb_phys_hash[h];
8989fa3e853Sbellard     tb->phys_hash_next = *ptb;
8999fa3e853Sbellard     *ptb = tb;
900fd6ce8f6Sbellard 
901fd6ce8f6Sbellard     /* add in the page list */
9029fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9039fa3e853Sbellard     if (phys_page2 != -1)
9049fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9059fa3e853Sbellard     else
9069fa3e853Sbellard         tb->page_addr[1] = -1;
9079fa3e853Sbellard 
908d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
909d4e8164fSbellard     tb->jmp_next[0] = NULL;
910d4e8164fSbellard     tb->jmp_next[1] = NULL;
911b448f2f3Sbellard #ifdef USE_CODE_COPY
912b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
913b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
914b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
915b448f2f3Sbellard #endif
916d4e8164fSbellard 
917d4e8164fSbellard     /* init original jump addresses */
918d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
919d4e8164fSbellard         tb_reset_jump(tb, 0);
920d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
921d4e8164fSbellard         tb_reset_jump(tb, 1);
9228a40a180Sbellard 
9238a40a180Sbellard #ifdef DEBUG_TB_CHECK
9248a40a180Sbellard     tb_page_check();
9258a40a180Sbellard #endif
926fd6ce8f6Sbellard }
927fd6ce8f6Sbellard 
928a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
929a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
930a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
931a513fe19Sbellard {
932a513fe19Sbellard     int m_min, m_max, m;
933a513fe19Sbellard     unsigned long v;
934a513fe19Sbellard     TranslationBlock *tb;
935a513fe19Sbellard 
936a513fe19Sbellard     if (nb_tbs <= 0)
937a513fe19Sbellard         return NULL;
938a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
939a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
940a513fe19Sbellard         return NULL;
941a513fe19Sbellard     /* binary search (cf Knuth) */
942a513fe19Sbellard     m_min = 0;
943a513fe19Sbellard     m_max = nb_tbs - 1;
944a513fe19Sbellard     while (m_min <= m_max) {
945a513fe19Sbellard         m = (m_min + m_max) >> 1;
946a513fe19Sbellard         tb = &tbs[m];
947a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
948a513fe19Sbellard         if (v == tc_ptr)
949a513fe19Sbellard             return tb;
950a513fe19Sbellard         else if (tc_ptr < v) {
951a513fe19Sbellard             m_max = m - 1;
952a513fe19Sbellard         } else {
953a513fe19Sbellard             m_min = m + 1;
954a513fe19Sbellard         }
955a513fe19Sbellard     }
956a513fe19Sbellard     return &tbs[m_max];
957a513fe19Sbellard }
9587501267eSbellard 
959ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
960ea041c0eSbellard 
961ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
962ea041c0eSbellard {
963ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
964ea041c0eSbellard     unsigned int n1;
965ea041c0eSbellard 
966ea041c0eSbellard     tb1 = tb->jmp_next[n];
967ea041c0eSbellard     if (tb1 != NULL) {
968ea041c0eSbellard         /* find head of list */
969ea041c0eSbellard         for(;;) {
970ea041c0eSbellard             n1 = (long)tb1 & 3;
971ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
972ea041c0eSbellard             if (n1 == 2)
973ea041c0eSbellard                 break;
974ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
975ea041c0eSbellard         }
976ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
977ea041c0eSbellard         tb_next = tb1;
978ea041c0eSbellard 
979ea041c0eSbellard         /* remove tb from the jmp_first list */
980ea041c0eSbellard         ptb = &tb_next->jmp_first;
981ea041c0eSbellard         for(;;) {
982ea041c0eSbellard             tb1 = *ptb;
983ea041c0eSbellard             n1 = (long)tb1 & 3;
984ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
985ea041c0eSbellard             if (n1 == n && tb1 == tb)
986ea041c0eSbellard                 break;
987ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
988ea041c0eSbellard         }
989ea041c0eSbellard         *ptb = tb->jmp_next[n];
990ea041c0eSbellard         tb->jmp_next[n] = NULL;
991ea041c0eSbellard 
992ea041c0eSbellard         /* suppress the jump to next tb in generated code */
993ea041c0eSbellard         tb_reset_jump(tb, n);
994ea041c0eSbellard 
9950124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
996ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
997ea041c0eSbellard     }
998ea041c0eSbellard }
999ea041c0eSbellard 
1000ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1001ea041c0eSbellard {
1002ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1003ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1004ea041c0eSbellard }
1005ea041c0eSbellard 
10061fddef4bSbellard #if defined(TARGET_HAS_ICE)
1007d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1008d720b93dSbellard {
1009c2f07f81Spbrook     target_ulong addr, pd;
1010c2f07f81Spbrook     ram_addr_t ram_addr;
1011c2f07f81Spbrook     PhysPageDesc *p;
1012d720b93dSbellard 
1013c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1014c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1015c2f07f81Spbrook     if (!p) {
1016c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1017c2f07f81Spbrook     } else {
1018c2f07f81Spbrook         pd = p->phys_offset;
1019c2f07f81Spbrook     }
1020c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1021706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1022d720b93dSbellard }
1023c27004ecSbellard #endif
1024d720b93dSbellard 
1025c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1026c33a346eSbellard    breakpoint is reached */
10272e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
10284c3a88a2Sbellard {
10291fddef4bSbellard #if defined(TARGET_HAS_ICE)
10304c3a88a2Sbellard     int i;
10314c3a88a2Sbellard 
10324c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10334c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10344c3a88a2Sbellard             return 0;
10354c3a88a2Sbellard     }
10364c3a88a2Sbellard 
10374c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
10384c3a88a2Sbellard         return -1;
10394c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1040d720b93dSbellard 
1041d720b93dSbellard     breakpoint_invalidate(env, pc);
10424c3a88a2Sbellard     return 0;
10434c3a88a2Sbellard #else
10444c3a88a2Sbellard     return -1;
10454c3a88a2Sbellard #endif
10464c3a88a2Sbellard }
10474c3a88a2Sbellard 
10484c3a88a2Sbellard /* remove a breakpoint */
10492e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
10504c3a88a2Sbellard {
10511fddef4bSbellard #if defined(TARGET_HAS_ICE)
10524c3a88a2Sbellard     int i;
10534c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10544c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10554c3a88a2Sbellard             goto found;
10564c3a88a2Sbellard     }
10574c3a88a2Sbellard     return -1;
10584c3a88a2Sbellard  found:
10594c3a88a2Sbellard     env->nb_breakpoints--;
10601fddef4bSbellard     if (i < env->nb_breakpoints)
10611fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1062d720b93dSbellard 
1063d720b93dSbellard     breakpoint_invalidate(env, pc);
10644c3a88a2Sbellard     return 0;
10654c3a88a2Sbellard #else
10664c3a88a2Sbellard     return -1;
10674c3a88a2Sbellard #endif
10684c3a88a2Sbellard }
10694c3a88a2Sbellard 
1070c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1071c33a346eSbellard    CPU loop after each instruction */
1072c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1073c33a346eSbellard {
10741fddef4bSbellard #if defined(TARGET_HAS_ICE)
1075c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1076c33a346eSbellard         env->singlestep_enabled = enabled;
1077c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
10789fa3e853Sbellard         /* XXX: only flush what is necessary */
10790124311eSbellard         tb_flush(env);
1080c33a346eSbellard     }
1081c33a346eSbellard #endif
1082c33a346eSbellard }
1083c33a346eSbellard 
108434865134Sbellard /* enable or disable low levels log */
108534865134Sbellard void cpu_set_log(int log_flags)
108634865134Sbellard {
108734865134Sbellard     loglevel = log_flags;
108834865134Sbellard     if (loglevel && !logfile) {
108934865134Sbellard         logfile = fopen(logfilename, "w");
109034865134Sbellard         if (!logfile) {
109134865134Sbellard             perror(logfilename);
109234865134Sbellard             _exit(1);
109334865134Sbellard         }
10949fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
10959fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
10969fa3e853Sbellard         {
10979fa3e853Sbellard             static uint8_t logfile_buf[4096];
10989fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
10999fa3e853Sbellard         }
11009fa3e853Sbellard #else
110134865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
11029fa3e853Sbellard #endif
110334865134Sbellard     }
110434865134Sbellard }
110534865134Sbellard 
110634865134Sbellard void cpu_set_log_filename(const char *filename)
110734865134Sbellard {
110834865134Sbellard     logfilename = strdup(filename);
110934865134Sbellard }
1110c33a346eSbellard 
11110124311eSbellard /* mask must never be zero, except for A20 change call */
111268a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1113ea041c0eSbellard {
1114ea041c0eSbellard     TranslationBlock *tb;
1115ee8b7021Sbellard     static int interrupt_lock;
1116ea041c0eSbellard 
111768a79315Sbellard     env->interrupt_request |= mask;
1118ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1119ea041c0eSbellard        all the potentially executing TB */
1120ea041c0eSbellard     tb = env->current_tb;
1121ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1122ee8b7021Sbellard         env->current_tb = NULL;
1123ea041c0eSbellard         tb_reset_jump_recursive(tb);
1124ee8b7021Sbellard         interrupt_lock = 0;
1125ea041c0eSbellard     }
1126ea041c0eSbellard }
1127ea041c0eSbellard 
1128b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1129b54ad049Sbellard {
1130b54ad049Sbellard     env->interrupt_request &= ~mask;
1131b54ad049Sbellard }
1132b54ad049Sbellard 
1133f193c797Sbellard CPULogItem cpu_log_items[] = {
1134f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1135f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1136f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1137f193c797Sbellard       "show target assembly code for each compiled TB" },
1138f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1139f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1140f193c797Sbellard #ifdef TARGET_I386
1141f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1142f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1143f193c797Sbellard #endif
1144f193c797Sbellard     { CPU_LOG_INT, "int",
1145f193c797Sbellard       "show interrupts/exceptions in short format" },
1146f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1147f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
11489fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
11499fddaa0cSbellard       "show CPU state before bloc translation" },
1150f193c797Sbellard #ifdef TARGET_I386
1151f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1152f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1153f193c797Sbellard #endif
11548e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1155fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1156fd872598Sbellard       "show all i/o ports accesses" },
11578e3a9fd2Sbellard #endif
1158f193c797Sbellard     { 0, NULL, NULL },
1159f193c797Sbellard };
1160f193c797Sbellard 
1161f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1162f193c797Sbellard {
1163f193c797Sbellard     if (strlen(s2) != n)
1164f193c797Sbellard         return 0;
1165f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1166f193c797Sbellard }
1167f193c797Sbellard 
1168f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1169f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1170f193c797Sbellard {
1171f193c797Sbellard     CPULogItem *item;
1172f193c797Sbellard     int mask;
1173f193c797Sbellard     const char *p, *p1;
1174f193c797Sbellard 
1175f193c797Sbellard     p = str;
1176f193c797Sbellard     mask = 0;
1177f193c797Sbellard     for(;;) {
1178f193c797Sbellard         p1 = strchr(p, ',');
1179f193c797Sbellard         if (!p1)
1180f193c797Sbellard             p1 = p + strlen(p);
11818e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
11828e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
11838e3a9fd2Sbellard 			mask |= item->mask;
11848e3a9fd2Sbellard 		}
11858e3a9fd2Sbellard 	} else {
1186f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1187f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1188f193c797Sbellard                 goto found;
1189f193c797Sbellard         }
1190f193c797Sbellard         return 0;
11918e3a9fd2Sbellard 	}
1192f193c797Sbellard     found:
1193f193c797Sbellard         mask |= item->mask;
1194f193c797Sbellard         if (*p1 != ',')
1195f193c797Sbellard             break;
1196f193c797Sbellard         p = p1 + 1;
1197f193c797Sbellard     }
1198f193c797Sbellard     return mask;
1199f193c797Sbellard }
1200ea041c0eSbellard 
12017501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
12027501267eSbellard {
12037501267eSbellard     va_list ap;
12047501267eSbellard 
12057501267eSbellard     va_start(ap, fmt);
12067501267eSbellard     fprintf(stderr, "qemu: fatal: ");
12077501267eSbellard     vfprintf(stderr, fmt, ap);
12087501267eSbellard     fprintf(stderr, "\n");
12097501267eSbellard #ifdef TARGET_I386
12107fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
12117fe48483Sbellard #else
12127fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
12137501267eSbellard #endif
12147501267eSbellard     va_end(ap);
12157501267eSbellard     abort();
12167501267eSbellard }
12177501267eSbellard 
12180124311eSbellard #if !defined(CONFIG_USER_ONLY)
12190124311eSbellard 
1220ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1221ee8b7021Sbellard    implemented yet) */
1222ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
122333417e70Sbellard {
122433417e70Sbellard     int i;
12250124311eSbellard 
12269fa3e853Sbellard #if defined(DEBUG_TLB)
12279fa3e853Sbellard     printf("tlb_flush:\n");
12289fa3e853Sbellard #endif
12290124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12300124311eSbellard        links while we are modifying them */
12310124311eSbellard     env->current_tb = NULL;
12320124311eSbellard 
123333417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
123484b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
123584b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
123684b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
123784b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
123884b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
123984b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
124033417e70Sbellard     }
12419fa3e853Sbellard 
12428a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
12439fa3e853Sbellard 
12449fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12459fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
12469fa3e853Sbellard #endif
12470a962c02Sbellard #ifdef USE_KQEMU
12480a962c02Sbellard     if (env->kqemu_enabled) {
12490a962c02Sbellard         kqemu_flush(env, flush_global);
12500a962c02Sbellard     }
12510a962c02Sbellard #endif
1252e3db7226Sbellard     tlb_flush_count++;
125333417e70Sbellard }
125433417e70Sbellard 
1255274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
125661382a50Sbellard {
125784b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
125884b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
125984b7b8e7Sbellard         addr == (tlb_entry->addr_write &
126084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126184b7b8e7Sbellard         addr == (tlb_entry->addr_code &
126284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
126384b7b8e7Sbellard         tlb_entry->addr_read = -1;
126484b7b8e7Sbellard         tlb_entry->addr_write = -1;
126584b7b8e7Sbellard         tlb_entry->addr_code = -1;
126684b7b8e7Sbellard     }
126761382a50Sbellard }
126861382a50Sbellard 
12692e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
127033417e70Sbellard {
12718a40a180Sbellard     int i;
12729fa3e853Sbellard     TranslationBlock *tb;
12730124311eSbellard 
12749fa3e853Sbellard #if defined(DEBUG_TLB)
1275108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
12769fa3e853Sbellard #endif
12770124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12780124311eSbellard        links while we are modifying them */
12790124311eSbellard     env->current_tb = NULL;
128033417e70Sbellard 
128161382a50Sbellard     addr &= TARGET_PAGE_MASK;
128233417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
128384b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
128484b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
12850124311eSbellard 
12868a40a180Sbellard     for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
12878a40a180Sbellard         tb = env->tb_jmp_cache[i];
12888a40a180Sbellard         if (tb &&
12898a40a180Sbellard             ((tb->pc & TARGET_PAGE_MASK) == addr ||
12908a40a180Sbellard              ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
12918a40a180Sbellard             env->tb_jmp_cache[i] = NULL;
12929fa3e853Sbellard         }
129361382a50Sbellard     }
129461382a50Sbellard 
12959fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12969fa3e853Sbellard     if (addr < MMAP_AREA_END)
12979fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
12989fa3e853Sbellard #endif
12990a962c02Sbellard #ifdef USE_KQEMU
13000a962c02Sbellard     if (env->kqemu_enabled) {
13010a962c02Sbellard         kqemu_flush_page(env, addr);
13020a962c02Sbellard     }
13030a962c02Sbellard #endif
13049fa3e853Sbellard }
13059fa3e853Sbellard 
13069fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
13079fa3e853Sbellard    can be detected */
13086a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
130961382a50Sbellard {
13106a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
13116a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
13126a00d601Sbellard                                     CODE_DIRTY_FLAG);
13139fa3e853Sbellard }
13149fa3e853Sbellard 
13159fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
13163a7d929eSbellard    tested for self modifying code */
13173a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
13183a7d929eSbellard                                     target_ulong vaddr)
13199fa3e853Sbellard {
13203a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
13219fa3e853Sbellard }
13229fa3e853Sbellard 
13231ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
13241ccde1cbSbellard                                          unsigned long start, unsigned long length)
13251ccde1cbSbellard {
13261ccde1cbSbellard     unsigned long addr;
132784b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
132884b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
13291ccde1cbSbellard         if ((addr - start) < length) {
133084b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
13311ccde1cbSbellard         }
13321ccde1cbSbellard     }
13331ccde1cbSbellard }
13341ccde1cbSbellard 
13353a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
13360a962c02Sbellard                                      int dirty_flags)
13371ccde1cbSbellard {
13381ccde1cbSbellard     CPUState *env;
13394f2ac237Sbellard     unsigned long length, start1;
13400a962c02Sbellard     int i, mask, len;
13410a962c02Sbellard     uint8_t *p;
13421ccde1cbSbellard 
13431ccde1cbSbellard     start &= TARGET_PAGE_MASK;
13441ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
13451ccde1cbSbellard 
13461ccde1cbSbellard     length = end - start;
13471ccde1cbSbellard     if (length == 0)
13481ccde1cbSbellard         return;
13490a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
13503a7d929eSbellard #ifdef USE_KQEMU
13516a00d601Sbellard     /* XXX: should not depend on cpu context */
13526a00d601Sbellard     env = first_cpu;
13533a7d929eSbellard     if (env->kqemu_enabled) {
1354f23db169Sbellard         ram_addr_t addr;
1355f23db169Sbellard         addr = start;
1356f23db169Sbellard         for(i = 0; i < len; i++) {
1357f23db169Sbellard             kqemu_set_notdirty(env, addr);
1358f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1359f23db169Sbellard         }
13603a7d929eSbellard     }
13613a7d929eSbellard #endif
1362f23db169Sbellard     mask = ~dirty_flags;
1363f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1364f23db169Sbellard     for(i = 0; i < len; i++)
1365f23db169Sbellard         p[i] &= mask;
1366f23db169Sbellard 
13671ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
13681ccde1cbSbellard        when accessing the range */
136959817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
13706a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
13711ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
137284b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
13731ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
137484b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
13756a00d601Sbellard     }
137659817ccbSbellard 
137759817ccbSbellard #if !defined(CONFIG_SOFTMMU)
137859817ccbSbellard     /* XXX: this is expensive */
137959817ccbSbellard     {
138059817ccbSbellard         VirtPageDesc *p;
138159817ccbSbellard         int j;
138259817ccbSbellard         target_ulong addr;
138359817ccbSbellard 
138459817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
138559817ccbSbellard             p = l1_virt_map[i];
138659817ccbSbellard             if (p) {
138759817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
138859817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
138959817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
139059817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
139159817ccbSbellard                         (p->prot & PROT_WRITE)) {
139259817ccbSbellard                         if (addr < MMAP_AREA_END) {
139359817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
139459817ccbSbellard                                      p->prot & ~PROT_WRITE);
139559817ccbSbellard                         }
139659817ccbSbellard                     }
139759817ccbSbellard                     addr += TARGET_PAGE_SIZE;
139859817ccbSbellard                     p++;
139959817ccbSbellard                 }
140059817ccbSbellard             }
140159817ccbSbellard         }
140259817ccbSbellard     }
140359817ccbSbellard #endif
14041ccde1cbSbellard }
14051ccde1cbSbellard 
14063a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
14073a7d929eSbellard {
14083a7d929eSbellard     ram_addr_t ram_addr;
14093a7d929eSbellard 
141084b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
141184b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
14123a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
14133a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
141484b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
14153a7d929eSbellard         }
14163a7d929eSbellard     }
14173a7d929eSbellard }
14183a7d929eSbellard 
14193a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
14203a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
14213a7d929eSbellard {
14223a7d929eSbellard     int i;
14233a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
142484b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
14253a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
142684b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
14273a7d929eSbellard }
14283a7d929eSbellard 
14291ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
14301ccde1cbSbellard                                   unsigned long start)
14311ccde1cbSbellard {
14321ccde1cbSbellard     unsigned long addr;
143384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
143484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14351ccde1cbSbellard         if (addr == start) {
143684b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
14371ccde1cbSbellard         }
14381ccde1cbSbellard     }
14391ccde1cbSbellard }
14401ccde1cbSbellard 
14411ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
14421ccde1cbSbellard    addr so that it is no longer dirty */
14436a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
14446a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
14451ccde1cbSbellard {
14461ccde1cbSbellard     int i;
14471ccde1cbSbellard 
14481ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
14491ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
145084b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
145184b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
14521ccde1cbSbellard }
14531ccde1cbSbellard 
145459817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
145559817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
145659817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
145759817ccbSbellard    conflicting with the host address space). */
145884b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
14592e12669aSbellard                       target_phys_addr_t paddr, int prot,
14609fa3e853Sbellard                       int is_user, int is_softmmu)
14619fa3e853Sbellard {
146292e873b9Sbellard     PhysPageDesc *p;
14634f2ac237Sbellard     unsigned long pd;
14649fa3e853Sbellard     unsigned int index;
14654f2ac237Sbellard     target_ulong address;
1466108c49b8Sbellard     target_phys_addr_t addend;
14679fa3e853Sbellard     int ret;
146884b7b8e7Sbellard     CPUTLBEntry *te;
14699fa3e853Sbellard 
147092e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
14719fa3e853Sbellard     if (!p) {
14729fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
14739fa3e853Sbellard     } else {
14749fa3e853Sbellard         pd = p->phys_offset;
14759fa3e853Sbellard     }
14769fa3e853Sbellard #if defined(DEBUG_TLB)
14773a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
147884b7b8e7Sbellard            vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
14799fa3e853Sbellard #endif
14809fa3e853Sbellard 
14819fa3e853Sbellard     ret = 0;
14829fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14839fa3e853Sbellard     if (is_softmmu)
14849fa3e853Sbellard #endif
14859fa3e853Sbellard     {
14869fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
14879fa3e853Sbellard             /* IO memory case */
14889fa3e853Sbellard             address = vaddr | pd;
14899fa3e853Sbellard             addend = paddr;
14909fa3e853Sbellard         } else {
14919fa3e853Sbellard             /* standard memory */
14929fa3e853Sbellard             address = vaddr;
14939fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
14949fa3e853Sbellard         }
14959fa3e853Sbellard 
149690f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
14979fa3e853Sbellard         addend -= vaddr;
149884b7b8e7Sbellard         te = &env->tlb_table[is_user][index];
149984b7b8e7Sbellard         te->addend = addend;
150067b915a5Sbellard         if (prot & PAGE_READ) {
150184b7b8e7Sbellard             te->addr_read = address;
15029fa3e853Sbellard         } else {
150384b7b8e7Sbellard             te->addr_read = -1;
150484b7b8e7Sbellard         }
150584b7b8e7Sbellard         if (prot & PAGE_EXEC) {
150684b7b8e7Sbellard             te->addr_code = address;
150784b7b8e7Sbellard         } else {
150884b7b8e7Sbellard             te->addr_code = -1;
15099fa3e853Sbellard         }
151067b915a5Sbellard         if (prot & PAGE_WRITE) {
15119fa3e853Sbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
15129fa3e853Sbellard                 /* ROM: access is ignored (same as unassigned) */
151384b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_ROM;
15143a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
15151ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
151684b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
15179fa3e853Sbellard             } else {
151884b7b8e7Sbellard                 te->addr_write = address;
15199fa3e853Sbellard             }
15209fa3e853Sbellard         } else {
152184b7b8e7Sbellard             te->addr_write = -1;
15229fa3e853Sbellard         }
15239fa3e853Sbellard     }
15249fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15259fa3e853Sbellard     else {
15269fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
15279fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
15289fa3e853Sbellard                soft MMU */
15299fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
15309fa3e853Sbellard                 ret = 2;
15319fa3e853Sbellard         } else {
15329fa3e853Sbellard             void *map_addr;
153359817ccbSbellard 
153459817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
153559817ccbSbellard                 ret = 2;
153659817ccbSbellard             } else {
15379fa3e853Sbellard                 if (prot & PROT_WRITE) {
153859817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1539d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
154059817ccbSbellard                         first_tb ||
1541d720b93dSbellard #endif
154259817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
154359817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
15449fa3e853Sbellard                         /* ROM: we do as if code was inside */
15459fa3e853Sbellard                         /* if code is present, we only map as read only and save the
15469fa3e853Sbellard                            original mapping */
15479fa3e853Sbellard                         VirtPageDesc *vp;
15489fa3e853Sbellard 
154990f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
15509fa3e853Sbellard                         vp->phys_addr = pd;
15519fa3e853Sbellard                         vp->prot = prot;
15529fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
15539fa3e853Sbellard                         prot &= ~PAGE_WRITE;
15549fa3e853Sbellard                     }
15559fa3e853Sbellard                 }
15569fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
15579fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
15589fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
15599fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
15609fa3e853Sbellard                               paddr, vaddr);
15619fa3e853Sbellard                 }
15629fa3e853Sbellard             }
15639fa3e853Sbellard         }
156459817ccbSbellard     }
15659fa3e853Sbellard #endif
15669fa3e853Sbellard     return ret;
15679fa3e853Sbellard }
15689fa3e853Sbellard 
15699fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
15709fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
157153a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
15729fa3e853Sbellard {
15739fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15749fa3e853Sbellard     VirtPageDesc *vp;
15759fa3e853Sbellard 
15769fa3e853Sbellard #if defined(DEBUG_TLB)
15779fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
15789fa3e853Sbellard #endif
15799fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
158059817ccbSbellard 
158159817ccbSbellard     /* if it is not mapped, no need to worry here */
158259817ccbSbellard     if (addr >= MMAP_AREA_END)
158359817ccbSbellard         return 0;
15849fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
15859fa3e853Sbellard     if (!vp)
15869fa3e853Sbellard         return 0;
15879fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
15889fa3e853Sbellard        validates only the code TLB */
15899fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
15909fa3e853Sbellard         return 0;
15919fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
15929fa3e853Sbellard         return 0;
15939fa3e853Sbellard #if defined(DEBUG_TLB)
15949fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
15959fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
15969fa3e853Sbellard #endif
159759817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
159859817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
159959817ccbSbellard                   (unsigned long)addr, vp->prot);
1600d720b93dSbellard     /* set the dirty bit */
16010a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1602d720b93dSbellard     /* flush the code inside */
1603d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
16049fa3e853Sbellard     return 1;
16059fa3e853Sbellard #else
16069fa3e853Sbellard     return 0;
16079fa3e853Sbellard #endif
160833417e70Sbellard }
160933417e70Sbellard 
16100124311eSbellard #else
16110124311eSbellard 
1612ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
16130124311eSbellard {
16140124311eSbellard }
16150124311eSbellard 
16162e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
16170124311eSbellard {
16180124311eSbellard }
16190124311eSbellard 
162084b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
16212e12669aSbellard                       target_phys_addr_t paddr, int prot,
16229fa3e853Sbellard                       int is_user, int is_softmmu)
162333417e70Sbellard {
16249fa3e853Sbellard     return 0;
162533417e70Sbellard }
162633417e70Sbellard 
16279fa3e853Sbellard /* dump memory mappings */
16289fa3e853Sbellard void page_dump(FILE *f)
162933417e70Sbellard {
16309fa3e853Sbellard     unsigned long start, end;
16319fa3e853Sbellard     int i, j, prot, prot1;
16329fa3e853Sbellard     PageDesc *p;
16339fa3e853Sbellard 
16349fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
16359fa3e853Sbellard             "start", "end", "size", "prot");
16369fa3e853Sbellard     start = -1;
16379fa3e853Sbellard     end = -1;
16389fa3e853Sbellard     prot = 0;
16399fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
16409fa3e853Sbellard         if (i < L1_SIZE)
16419fa3e853Sbellard             p = l1_map[i];
16429fa3e853Sbellard         else
16439fa3e853Sbellard             p = NULL;
16449fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
164533417e70Sbellard             if (!p)
16469fa3e853Sbellard                 prot1 = 0;
16479fa3e853Sbellard             else
16489fa3e853Sbellard                 prot1 = p[j].flags;
16499fa3e853Sbellard             if (prot1 != prot) {
16509fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
16519fa3e853Sbellard                 if (start != -1) {
16529fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
16539fa3e853Sbellard                             start, end, end - start,
16549fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
16559fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
16569fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
165733417e70Sbellard                 }
16589fa3e853Sbellard                 if (prot1 != 0)
16599fa3e853Sbellard                     start = end;
16609fa3e853Sbellard                 else
16619fa3e853Sbellard                     start = -1;
16629fa3e853Sbellard                 prot = prot1;
16639fa3e853Sbellard             }
16649fa3e853Sbellard             if (!p)
16659fa3e853Sbellard                 break;
16669fa3e853Sbellard         }
16679fa3e853Sbellard     }
16689fa3e853Sbellard }
16699fa3e853Sbellard 
167053a5960aSpbrook int page_get_flags(target_ulong address)
16719fa3e853Sbellard {
16729fa3e853Sbellard     PageDesc *p;
16739fa3e853Sbellard 
16749fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
16759fa3e853Sbellard     if (!p)
16769fa3e853Sbellard         return 0;
16779fa3e853Sbellard     return p->flags;
16789fa3e853Sbellard }
16799fa3e853Sbellard 
16809fa3e853Sbellard /* modify the flags of a page and invalidate the code if
16819fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
16829fa3e853Sbellard    depending on PAGE_WRITE */
168353a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
16849fa3e853Sbellard {
16859fa3e853Sbellard     PageDesc *p;
168653a5960aSpbrook     target_ulong addr;
16879fa3e853Sbellard 
16889fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
16899fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
16909fa3e853Sbellard     if (flags & PAGE_WRITE)
16919fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
16929fa3e853Sbellard     spin_lock(&tb_lock);
16939fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
16949fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
16959fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
16969fa3e853Sbellard            inside */
16979fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
16989fa3e853Sbellard             (flags & PAGE_WRITE) &&
16999fa3e853Sbellard             p->first_tb) {
1700d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
17019fa3e853Sbellard         }
17029fa3e853Sbellard         p->flags = flags;
17039fa3e853Sbellard     }
17049fa3e853Sbellard     spin_unlock(&tb_lock);
17059fa3e853Sbellard }
17069fa3e853Sbellard 
17079fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17089fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
170953a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
17109fa3e853Sbellard {
17119fa3e853Sbellard     unsigned int page_index, prot, pindex;
17129fa3e853Sbellard     PageDesc *p, *p1;
171353a5960aSpbrook     target_ulong host_start, host_end, addr;
17149fa3e853Sbellard 
171583fb7adfSbellard     host_start = address & qemu_host_page_mask;
17169fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
17179fa3e853Sbellard     p1 = page_find(page_index);
17189fa3e853Sbellard     if (!p1)
17199fa3e853Sbellard         return 0;
172083fb7adfSbellard     host_end = host_start + qemu_host_page_size;
17219fa3e853Sbellard     p = p1;
17229fa3e853Sbellard     prot = 0;
17239fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
17249fa3e853Sbellard         prot |= p->flags;
17259fa3e853Sbellard         p++;
17269fa3e853Sbellard     }
17279fa3e853Sbellard     /* if the page was really writable, then we change its
17289fa3e853Sbellard        protection back to writable */
17299fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
17309fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
17319fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
173253a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
17339fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
17349fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
17359fa3e853Sbellard             /* and since the content will be modified, we must invalidate
17369fa3e853Sbellard                the corresponding translated code. */
1737d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
17389fa3e853Sbellard #ifdef DEBUG_TB_CHECK
17399fa3e853Sbellard             tb_invalidate_check(address);
17409fa3e853Sbellard #endif
17419fa3e853Sbellard             return 1;
17429fa3e853Sbellard         }
17439fa3e853Sbellard     }
17449fa3e853Sbellard     return 0;
17459fa3e853Sbellard }
17469fa3e853Sbellard 
17479fa3e853Sbellard /* call this function when system calls directly modify a memory area */
174853a5960aSpbrook /* ??? This should be redundant now we have lock_user.  */
174953a5960aSpbrook void page_unprotect_range(target_ulong data, target_ulong data_size)
17509fa3e853Sbellard {
175153a5960aSpbrook     target_ulong start, end, addr;
17529fa3e853Sbellard 
175353a5960aSpbrook     start = data;
17549fa3e853Sbellard     end = start + data_size;
17559fa3e853Sbellard     start &= TARGET_PAGE_MASK;
17569fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
17579fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1758d720b93dSbellard         page_unprotect(addr, 0, NULL);
17599fa3e853Sbellard     }
17609fa3e853Sbellard }
17619fa3e853Sbellard 
17626a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
17636a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
17641ccde1cbSbellard {
17651ccde1cbSbellard }
17669fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
176733417e70Sbellard 
176833417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
176933417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
177033417e70Sbellard    io memory page */
17712e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
17722e12669aSbellard                                   unsigned long size,
17732e12669aSbellard                                   unsigned long phys_offset)
177433417e70Sbellard {
1775108c49b8Sbellard     target_phys_addr_t addr, end_addr;
177692e873b9Sbellard     PhysPageDesc *p;
177733417e70Sbellard 
17785fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
177933417e70Sbellard     end_addr = start_addr + size;
17805fd386f6Sbellard     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1781108c49b8Sbellard         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
17829fa3e853Sbellard         p->phys_offset = phys_offset;
17839fa3e853Sbellard         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
178433417e70Sbellard             phys_offset += TARGET_PAGE_SIZE;
178533417e70Sbellard     }
178633417e70Sbellard }
178733417e70Sbellard 
1788a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
178933417e70Sbellard {
179033417e70Sbellard     return 0;
179133417e70Sbellard }
179233417e70Sbellard 
1793a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
179433417e70Sbellard {
179533417e70Sbellard }
179633417e70Sbellard 
179733417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
179833417e70Sbellard     unassigned_mem_readb,
179933417e70Sbellard     unassigned_mem_readb,
180033417e70Sbellard     unassigned_mem_readb,
180133417e70Sbellard };
180233417e70Sbellard 
180333417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
180433417e70Sbellard     unassigned_mem_writeb,
180533417e70Sbellard     unassigned_mem_writeb,
180633417e70Sbellard     unassigned_mem_writeb,
180733417e70Sbellard };
180833417e70Sbellard 
1809a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
18101ccde1cbSbellard {
18113a7d929eSbellard     unsigned long ram_addr;
18123a7d929eSbellard     int dirty_flags;
18133a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18143a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18153a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18163a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18173a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
18183a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18193a7d929eSbellard #endif
18203a7d929eSbellard     }
1821c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
1822f32fc648Sbellard #ifdef USE_KQEMU
1823f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1824f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1825f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1826f32fc648Sbellard #endif
1827f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1830f23db169Sbellard        flushed */
1831f23db169Sbellard     if (dirty_flags == 0xff)
18326a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18331ccde1cbSbellard }
18341ccde1cbSbellard 
1835a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
18361ccde1cbSbellard {
18373a7d929eSbellard     unsigned long ram_addr;
18383a7d929eSbellard     int dirty_flags;
18393a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18403a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18413a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18423a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18433a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
18443a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18453a7d929eSbellard #endif
18463a7d929eSbellard     }
1847c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
1848f32fc648Sbellard #ifdef USE_KQEMU
1849f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1850f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1851f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1852f32fc648Sbellard #endif
1853f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1854f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1855f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1856f23db169Sbellard        flushed */
1857f23db169Sbellard     if (dirty_flags == 0xff)
18586a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18591ccde1cbSbellard }
18601ccde1cbSbellard 
1861a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
18621ccde1cbSbellard {
18633a7d929eSbellard     unsigned long ram_addr;
18643a7d929eSbellard     int dirty_flags;
18653a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18663a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18673a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18683a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18693a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
18703a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18713a7d929eSbellard #endif
18723a7d929eSbellard     }
1873c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
1874f32fc648Sbellard #ifdef USE_KQEMU
1875f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
1876f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
1878f32fc648Sbellard #endif
1879f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1882f23db169Sbellard        flushed */
1883f23db169Sbellard     if (dirty_flags == 0xff)
18846a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
18851ccde1cbSbellard }
18861ccde1cbSbellard 
18873a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
18883a7d929eSbellard     NULL, /* never used */
18893a7d929eSbellard     NULL, /* never used */
18903a7d929eSbellard     NULL, /* never used */
18913a7d929eSbellard };
18923a7d929eSbellard 
18931ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
18941ccde1cbSbellard     notdirty_mem_writeb,
18951ccde1cbSbellard     notdirty_mem_writew,
18961ccde1cbSbellard     notdirty_mem_writel,
18971ccde1cbSbellard };
18981ccde1cbSbellard 
189933417e70Sbellard static void io_mem_init(void)
190033417e70Sbellard {
19013a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1902a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
19033a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
19041ccde1cbSbellard     io_mem_nb = 5;
19051ccde1cbSbellard 
19061ccde1cbSbellard     /* alloc dirty bits array */
19070a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
19083a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
190933417e70Sbellard }
191033417e70Sbellard 
191133417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
191233417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
191333417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
191433417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
191533417e70Sbellard    allocated. The return value can be used with
191633417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
191733417e70Sbellard int cpu_register_io_memory(int io_index,
191833417e70Sbellard                            CPUReadMemoryFunc **mem_read,
1919a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
1920a4193c8aSbellard                            void *opaque)
192133417e70Sbellard {
192233417e70Sbellard     int i;
192333417e70Sbellard 
192433417e70Sbellard     if (io_index <= 0) {
1925b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
192633417e70Sbellard             return -1;
192733417e70Sbellard         io_index = io_mem_nb++;
192833417e70Sbellard     } else {
192933417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
193033417e70Sbellard             return -1;
193133417e70Sbellard     }
193233417e70Sbellard 
193333417e70Sbellard     for(i = 0;i < 3; i++) {
193433417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
193533417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
193633417e70Sbellard     }
1937a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
193833417e70Sbellard     return io_index << IO_MEM_SHIFT;
193933417e70Sbellard }
194061382a50Sbellard 
19418926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
19428926b517Sbellard {
19438926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
19448926b517Sbellard }
19458926b517Sbellard 
19468926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
19478926b517Sbellard {
19488926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
19498926b517Sbellard }
19508926b517Sbellard 
195113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
195213eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
19532e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
195413eb76e0Sbellard                             int len, int is_write)
195513eb76e0Sbellard {
195613eb76e0Sbellard     int l, flags;
195713eb76e0Sbellard     target_ulong page;
195853a5960aSpbrook     void * p;
195913eb76e0Sbellard 
196013eb76e0Sbellard     while (len > 0) {
196113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
196213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
196313eb76e0Sbellard         if (l > len)
196413eb76e0Sbellard             l = len;
196513eb76e0Sbellard         flags = page_get_flags(page);
196613eb76e0Sbellard         if (!(flags & PAGE_VALID))
196713eb76e0Sbellard             return;
196813eb76e0Sbellard         if (is_write) {
196913eb76e0Sbellard             if (!(flags & PAGE_WRITE))
197013eb76e0Sbellard                 return;
197153a5960aSpbrook             p = lock_user(addr, len, 0);
197253a5960aSpbrook             memcpy(p, buf, len);
197353a5960aSpbrook             unlock_user(p, addr, len);
197413eb76e0Sbellard         } else {
197513eb76e0Sbellard             if (!(flags & PAGE_READ))
197613eb76e0Sbellard                 return;
197753a5960aSpbrook             p = lock_user(addr, len, 1);
197853a5960aSpbrook             memcpy(buf, p, len);
197953a5960aSpbrook             unlock_user(p, addr, 0);
198013eb76e0Sbellard         }
198113eb76e0Sbellard         len -= l;
198213eb76e0Sbellard         buf += l;
198313eb76e0Sbellard         addr += l;
198413eb76e0Sbellard     }
198513eb76e0Sbellard }
19868df1cd07Sbellard 
198713eb76e0Sbellard #else
19882e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
198913eb76e0Sbellard                             int len, int is_write)
199013eb76e0Sbellard {
199113eb76e0Sbellard     int l, io_index;
199213eb76e0Sbellard     uint8_t *ptr;
199313eb76e0Sbellard     uint32_t val;
19942e12669aSbellard     target_phys_addr_t page;
19952e12669aSbellard     unsigned long pd;
199692e873b9Sbellard     PhysPageDesc *p;
199713eb76e0Sbellard 
199813eb76e0Sbellard     while (len > 0) {
199913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
200013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
200113eb76e0Sbellard         if (l > len)
200213eb76e0Sbellard             l = len;
200392e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
200413eb76e0Sbellard         if (!p) {
200513eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
200613eb76e0Sbellard         } else {
200713eb76e0Sbellard             pd = p->phys_offset;
200813eb76e0Sbellard         }
200913eb76e0Sbellard 
201013eb76e0Sbellard         if (is_write) {
20113a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
201213eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20136a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
20146a00d601Sbellard                    potential bugs */
201513eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
20161c213d19Sbellard                     /* 32 bit write access */
2017c27004ecSbellard                     val = ldl_p(buf);
2018a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
201913eb76e0Sbellard                     l = 4;
202013eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
20211c213d19Sbellard                     /* 16 bit write access */
2022c27004ecSbellard                     val = lduw_p(buf);
2023a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
202413eb76e0Sbellard                     l = 2;
202513eb76e0Sbellard                 } else {
20261c213d19Sbellard                     /* 8 bit write access */
2027c27004ecSbellard                     val = ldub_p(buf);
2028a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
202913eb76e0Sbellard                     l = 1;
203013eb76e0Sbellard                 }
203113eb76e0Sbellard             } else {
2032b448f2f3Sbellard                 unsigned long addr1;
2033b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
203413eb76e0Sbellard                 /* RAM case */
2035b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
203613eb76e0Sbellard                 memcpy(ptr, buf, l);
20373a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2038b448f2f3Sbellard                     /* invalidate code */
2039b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2040b448f2f3Sbellard                     /* set dirty bit */
2041f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2042f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
204313eb76e0Sbellard                 }
20443a7d929eSbellard             }
204513eb76e0Sbellard         } else {
20463a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
204713eb76e0Sbellard                 /* I/O case */
204813eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
204913eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
205013eb76e0Sbellard                     /* 32 bit read access */
2051a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2052c27004ecSbellard                     stl_p(buf, val);
205313eb76e0Sbellard                     l = 4;
205413eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
205513eb76e0Sbellard                     /* 16 bit read access */
2056a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2057c27004ecSbellard                     stw_p(buf, val);
205813eb76e0Sbellard                     l = 2;
205913eb76e0Sbellard                 } else {
20601c213d19Sbellard                     /* 8 bit read access */
2061a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2062c27004ecSbellard                     stb_p(buf, val);
206313eb76e0Sbellard                     l = 1;
206413eb76e0Sbellard                 }
206513eb76e0Sbellard             } else {
206613eb76e0Sbellard                 /* RAM case */
206713eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
206813eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
206913eb76e0Sbellard                 memcpy(buf, ptr, l);
207013eb76e0Sbellard             }
207113eb76e0Sbellard         }
207213eb76e0Sbellard         len -= l;
207313eb76e0Sbellard         buf += l;
207413eb76e0Sbellard         addr += l;
207513eb76e0Sbellard     }
207613eb76e0Sbellard }
20778df1cd07Sbellard 
20788df1cd07Sbellard /* warning: addr must be aligned */
20798df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
20808df1cd07Sbellard {
20818df1cd07Sbellard     int io_index;
20828df1cd07Sbellard     uint8_t *ptr;
20838df1cd07Sbellard     uint32_t val;
20848df1cd07Sbellard     unsigned long pd;
20858df1cd07Sbellard     PhysPageDesc *p;
20868df1cd07Sbellard 
20878df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
20888df1cd07Sbellard     if (!p) {
20898df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
20908df1cd07Sbellard     } else {
20918df1cd07Sbellard         pd = p->phys_offset;
20928df1cd07Sbellard     }
20938df1cd07Sbellard 
20943a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
20958df1cd07Sbellard         /* I/O case */
20968df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20978df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
20988df1cd07Sbellard     } else {
20998df1cd07Sbellard         /* RAM case */
21008df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
21018df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
21028df1cd07Sbellard         val = ldl_p(ptr);
21038df1cd07Sbellard     }
21048df1cd07Sbellard     return val;
21058df1cd07Sbellard }
21068df1cd07Sbellard 
210784b7b8e7Sbellard /* warning: addr must be aligned */
210884b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
210984b7b8e7Sbellard {
211084b7b8e7Sbellard     int io_index;
211184b7b8e7Sbellard     uint8_t *ptr;
211284b7b8e7Sbellard     uint64_t val;
211384b7b8e7Sbellard     unsigned long pd;
211484b7b8e7Sbellard     PhysPageDesc *p;
211584b7b8e7Sbellard 
211684b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
211784b7b8e7Sbellard     if (!p) {
211884b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
211984b7b8e7Sbellard     } else {
212084b7b8e7Sbellard         pd = p->phys_offset;
212184b7b8e7Sbellard     }
212284b7b8e7Sbellard 
212384b7b8e7Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
212484b7b8e7Sbellard         /* I/O case */
212584b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
212684b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
212784b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
212884b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
212984b7b8e7Sbellard #else
213084b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
213184b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
213284b7b8e7Sbellard #endif
213384b7b8e7Sbellard     } else {
213484b7b8e7Sbellard         /* RAM case */
213584b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
213684b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
213784b7b8e7Sbellard         val = ldq_p(ptr);
213884b7b8e7Sbellard     }
213984b7b8e7Sbellard     return val;
214084b7b8e7Sbellard }
214184b7b8e7Sbellard 
2142aab33094Sbellard /* XXX: optimize */
2143aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2144aab33094Sbellard {
2145aab33094Sbellard     uint8_t val;
2146aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2147aab33094Sbellard     return val;
2148aab33094Sbellard }
2149aab33094Sbellard 
2150aab33094Sbellard /* XXX: optimize */
2151aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2152aab33094Sbellard {
2153aab33094Sbellard     uint16_t val;
2154aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2155aab33094Sbellard     return tswap16(val);
2156aab33094Sbellard }
2157aab33094Sbellard 
21588df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
21598df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
21608df1cd07Sbellard    bits are used to track modified PTEs */
21618df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
21628df1cd07Sbellard {
21638df1cd07Sbellard     int io_index;
21648df1cd07Sbellard     uint8_t *ptr;
21658df1cd07Sbellard     unsigned long pd;
21668df1cd07Sbellard     PhysPageDesc *p;
21678df1cd07Sbellard 
21688df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21698df1cd07Sbellard     if (!p) {
21708df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21718df1cd07Sbellard     } else {
21728df1cd07Sbellard         pd = p->phys_offset;
21738df1cd07Sbellard     }
21748df1cd07Sbellard 
21753a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
21768df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
21778df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
21788df1cd07Sbellard     } else {
21798df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
21808df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
21818df1cd07Sbellard         stl_p(ptr, val);
21828df1cd07Sbellard     }
21838df1cd07Sbellard }
21848df1cd07Sbellard 
21858df1cd07Sbellard /* warning: addr must be aligned */
21868df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
21878df1cd07Sbellard {
21888df1cd07Sbellard     int io_index;
21898df1cd07Sbellard     uint8_t *ptr;
21908df1cd07Sbellard     unsigned long pd;
21918df1cd07Sbellard     PhysPageDesc *p;
21928df1cd07Sbellard 
21938df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21948df1cd07Sbellard     if (!p) {
21958df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21968df1cd07Sbellard     } else {
21978df1cd07Sbellard         pd = p->phys_offset;
21988df1cd07Sbellard     }
21998df1cd07Sbellard 
22003a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
22018df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
22028df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
22038df1cd07Sbellard     } else {
22048df1cd07Sbellard         unsigned long addr1;
22058df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
22068df1cd07Sbellard         /* RAM case */
22078df1cd07Sbellard         ptr = phys_ram_base + addr1;
22088df1cd07Sbellard         stl_p(ptr, val);
22093a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
22108df1cd07Sbellard             /* invalidate code */
22118df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
22128df1cd07Sbellard             /* set dirty bit */
2213f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2214f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
22158df1cd07Sbellard         }
22168df1cd07Sbellard     }
22173a7d929eSbellard }
22188df1cd07Sbellard 
2219aab33094Sbellard /* XXX: optimize */
2220aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2221aab33094Sbellard {
2222aab33094Sbellard     uint8_t v = val;
2223aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2224aab33094Sbellard }
2225aab33094Sbellard 
2226aab33094Sbellard /* XXX: optimize */
2227aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2228aab33094Sbellard {
2229aab33094Sbellard     uint16_t v = tswap16(val);
2230aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2231aab33094Sbellard }
2232aab33094Sbellard 
2233aab33094Sbellard /* XXX: optimize */
2234aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2235aab33094Sbellard {
2236aab33094Sbellard     val = tswap64(val);
2237aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2238aab33094Sbellard }
2239aab33094Sbellard 
224013eb76e0Sbellard #endif
224113eb76e0Sbellard 
224213eb76e0Sbellard /* virtual memory access for debug */
2243b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2244b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
224513eb76e0Sbellard {
224613eb76e0Sbellard     int l;
224713eb76e0Sbellard     target_ulong page, phys_addr;
224813eb76e0Sbellard 
224913eb76e0Sbellard     while (len > 0) {
225013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
225113eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
225213eb76e0Sbellard         /* if no physical page mapped, return an error */
225313eb76e0Sbellard         if (phys_addr == -1)
225413eb76e0Sbellard             return -1;
225513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
225613eb76e0Sbellard         if (l > len)
225713eb76e0Sbellard             l = len;
2258b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2259b448f2f3Sbellard                                buf, l, is_write);
226013eb76e0Sbellard         len -= l;
226113eb76e0Sbellard         buf += l;
226213eb76e0Sbellard         addr += l;
226313eb76e0Sbellard     }
226413eb76e0Sbellard     return 0;
226513eb76e0Sbellard }
226613eb76e0Sbellard 
2267e3db7226Sbellard void dump_exec_info(FILE *f,
2268e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2269e3db7226Sbellard {
2270e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2271e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2272e3db7226Sbellard     TranslationBlock *tb;
2273e3db7226Sbellard 
2274e3db7226Sbellard     target_code_size = 0;
2275e3db7226Sbellard     max_target_code_size = 0;
2276e3db7226Sbellard     cross_page = 0;
2277e3db7226Sbellard     direct_jmp_count = 0;
2278e3db7226Sbellard     direct_jmp2_count = 0;
2279e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2280e3db7226Sbellard         tb = &tbs[i];
2281e3db7226Sbellard         target_code_size += tb->size;
2282e3db7226Sbellard         if (tb->size > max_target_code_size)
2283e3db7226Sbellard             max_target_code_size = tb->size;
2284e3db7226Sbellard         if (tb->page_addr[1] != -1)
2285e3db7226Sbellard             cross_page++;
2286e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2287e3db7226Sbellard             direct_jmp_count++;
2288e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2289e3db7226Sbellard                 direct_jmp2_count++;
2290e3db7226Sbellard             }
2291e3db7226Sbellard         }
2292e3db7226Sbellard     }
2293e3db7226Sbellard     /* XXX: avoid using doubles ? */
2294e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2295e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2296e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2297e3db7226Sbellard                 max_target_code_size);
2298e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2299e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2300e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2301e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2302e3db7226Sbellard             cross_page,
2303e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2304e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2305e3db7226Sbellard                 direct_jmp_count,
2306e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2307e3db7226Sbellard                 direct_jmp2_count,
2308e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2309e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2310e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2311e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2312e3db7226Sbellard }
2313e3db7226Sbellard 
231461382a50Sbellard #if !defined(CONFIG_USER_ONLY)
231561382a50Sbellard 
231661382a50Sbellard #define MMUSUFFIX _cmmu
231761382a50Sbellard #define GETPC() NULL
231861382a50Sbellard #define env cpu_single_env
2319b769d8feSbellard #define SOFTMMU_CODE_ACCESS
232061382a50Sbellard 
232161382a50Sbellard #define SHIFT 0
232261382a50Sbellard #include "softmmu_template.h"
232361382a50Sbellard 
232461382a50Sbellard #define SHIFT 1
232561382a50Sbellard #include "softmmu_template.h"
232661382a50Sbellard 
232761382a50Sbellard #define SHIFT 2
232861382a50Sbellard #include "softmmu_template.h"
232961382a50Sbellard 
233061382a50Sbellard #define SHIFT 3
233161382a50Sbellard #include "softmmu_template.h"
233261382a50Sbellard 
233361382a50Sbellard #undef env
233461382a50Sbellard 
233561382a50Sbellard #endif
2336