xref: /qemu/system/physmem.c (revision 8a40a180d39ec535b16a9456965a59722950cee2)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3754936004Sbellard 
38fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
3966e85a21Sbellard //#define DEBUG_FLUSH
409fa3e853Sbellard //#define DEBUG_TLB
41fd6ce8f6Sbellard 
42fd6ce8f6Sbellard /* make various TB consistency checks */
43fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4498857888Sbellard //#define DEBUG_TLB_CHECK
45fd6ce8f6Sbellard 
46fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
47fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48fd6ce8f6Sbellard 
499fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
509fa3e853Sbellard 
519fa3e853Sbellard #define MMAP_AREA_START        0x00000000
529fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
53fd6ce8f6Sbellard 
54108c49b8Sbellard #if defined(TARGET_SPARC64)
55108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
56108c49b8Sbellard #elif defined(TARGET_PPC64)
57108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
58108c49b8Sbellard #else
59108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
61108c49b8Sbellard #endif
62108c49b8Sbellard 
63fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
649fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
65fd6ce8f6Sbellard int nb_tbs;
66eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
67eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
68fd6ce8f6Sbellard 
69b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70fd6ce8f6Sbellard uint8_t *code_gen_ptr;
71fd6ce8f6Sbellard 
729fa3e853Sbellard int phys_ram_size;
739fa3e853Sbellard int phys_ram_fd;
749fa3e853Sbellard uint8_t *phys_ram_base;
751ccde1cbSbellard uint8_t *phys_ram_dirty;
769fa3e853Sbellard 
7754936004Sbellard typedef struct PageDesc {
7892e873b9Sbellard     /* list of TBs intersecting this ram page */
79fd6ce8f6Sbellard     TranslationBlock *first_tb;
809fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
819fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
829fa3e853Sbellard     unsigned int code_write_count;
839fa3e853Sbellard     uint8_t *code_bitmap;
849fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
859fa3e853Sbellard     unsigned long flags;
869fa3e853Sbellard #endif
8754936004Sbellard } PageDesc;
8854936004Sbellard 
8992e873b9Sbellard typedef struct PhysPageDesc {
9092e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
91e04f40b5Sbellard     uint32_t phys_offset;
9292e873b9Sbellard } PhysPageDesc;
9392e873b9Sbellard 
9454936004Sbellard #define L2_BITS 10
9554936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
9654936004Sbellard 
9754936004Sbellard #define L1_SIZE (1 << L1_BITS)
9854936004Sbellard #define L2_SIZE (1 << L2_BITS)
9954936004Sbellard 
10033417e70Sbellard static void io_mem_init(void);
101fd6ce8f6Sbellard 
10283fb7adfSbellard unsigned long qemu_real_host_page_size;
10383fb7adfSbellard unsigned long qemu_host_page_bits;
10483fb7adfSbellard unsigned long qemu_host_page_size;
10583fb7adfSbellard unsigned long qemu_host_page_mask;
10654936004Sbellard 
10792e873b9Sbellard /* XXX: for system emulation, it could just be an array */
10854936004Sbellard static PageDesc *l1_map[L1_SIZE];
1090a962c02Sbellard PhysPageDesc **l1_phys_map;
11054936004Sbellard 
11133417e70Sbellard /* io memory support */
11233417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
11333417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
114a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
11533417e70Sbellard static int io_mem_nb;
11633417e70Sbellard 
11734865134Sbellard /* log support */
11834865134Sbellard char *logfilename = "/tmp/qemu.log";
11934865134Sbellard FILE *logfile;
12034865134Sbellard int loglevel;
12134865134Sbellard 
122e3db7226Sbellard /* statistics */
123e3db7226Sbellard static int tlb_flush_count;
124e3db7226Sbellard static int tb_flush_count;
125e3db7226Sbellard static int tb_phys_invalidate_count;
126e3db7226Sbellard 
127b346ff46Sbellard static void page_init(void)
12854936004Sbellard {
12983fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
13054936004Sbellard        TARGET_PAGE_SIZE */
13167b915a5Sbellard #ifdef _WIN32
132d5a8f07cSbellard     {
133d5a8f07cSbellard         SYSTEM_INFO system_info;
134d5a8f07cSbellard         DWORD old_protect;
135d5a8f07cSbellard 
136d5a8f07cSbellard         GetSystemInfo(&system_info);
137d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
138d5a8f07cSbellard 
139d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
140d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
141d5a8f07cSbellard     }
14267b915a5Sbellard #else
14383fb7adfSbellard     qemu_real_host_page_size = getpagesize();
144d5a8f07cSbellard     {
145d5a8f07cSbellard         unsigned long start, end;
146d5a8f07cSbellard 
147d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
148d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
149d5a8f07cSbellard 
150d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
151d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
152d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
153d5a8f07cSbellard 
154d5a8f07cSbellard         mprotect((void *)start, end - start,
155d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
156d5a8f07cSbellard     }
15767b915a5Sbellard #endif
158d5a8f07cSbellard 
15983fb7adfSbellard     if (qemu_host_page_size == 0)
16083fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
16183fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
16283fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
16383fb7adfSbellard     qemu_host_page_bits = 0;
16483fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
16583fb7adfSbellard         qemu_host_page_bits++;
16683fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
167108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
168108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
16954936004Sbellard }
17054936004Sbellard 
171fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
17254936004Sbellard {
17354936004Sbellard     PageDesc **lp, *p;
17454936004Sbellard 
17554936004Sbellard     lp = &l1_map[index >> L2_BITS];
17654936004Sbellard     p = *lp;
17754936004Sbellard     if (!p) {
17854936004Sbellard         /* allocate if not found */
17959817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
180fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
18154936004Sbellard         *lp = p;
18254936004Sbellard     }
18354936004Sbellard     return p + (index & (L2_SIZE - 1));
18454936004Sbellard }
18554936004Sbellard 
186fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
18754936004Sbellard {
18854936004Sbellard     PageDesc *p;
18954936004Sbellard 
19054936004Sbellard     p = l1_map[index >> L2_BITS];
19154936004Sbellard     if (!p)
19254936004Sbellard         return 0;
193fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
19454936004Sbellard }
19554936004Sbellard 
196108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
19792e873b9Sbellard {
198108c49b8Sbellard     void **lp, **p;
19992e873b9Sbellard 
200108c49b8Sbellard     p = (void **)l1_phys_map;
201108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
202108c49b8Sbellard 
203108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
204108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
205108c49b8Sbellard #endif
206108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
20792e873b9Sbellard     p = *lp;
20892e873b9Sbellard     if (!p) {
20992e873b9Sbellard         /* allocate if not found */
210108c49b8Sbellard         if (!alloc)
211108c49b8Sbellard             return NULL;
212108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
213108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
214108c49b8Sbellard         *lp = p;
215108c49b8Sbellard     }
216108c49b8Sbellard #endif
217108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
218108c49b8Sbellard     p = *lp;
219108c49b8Sbellard     if (!p) {
220108c49b8Sbellard         /* allocate if not found */
221108c49b8Sbellard         if (!alloc)
222108c49b8Sbellard             return NULL;
2230a962c02Sbellard         p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
22492e873b9Sbellard         memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
22592e873b9Sbellard         *lp = p;
22692e873b9Sbellard     }
227108c49b8Sbellard     return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
22892e873b9Sbellard }
22992e873b9Sbellard 
230108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
23192e873b9Sbellard {
232108c49b8Sbellard     return phys_page_find_alloc(index, 0);
23392e873b9Sbellard }
23492e873b9Sbellard 
2359fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2363a7d929eSbellard static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
2373a7d929eSbellard                              target_ulong vaddr);
2383a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2393a7d929eSbellard                                     target_ulong vaddr);
2409fa3e853Sbellard #endif
241fd6ce8f6Sbellard 
242b346ff46Sbellard void cpu_exec_init(void)
243fd6ce8f6Sbellard {
244fd6ce8f6Sbellard     if (!code_gen_ptr) {
245fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
246b346ff46Sbellard         page_init();
24733417e70Sbellard         io_mem_init();
248fd6ce8f6Sbellard     }
249fd6ce8f6Sbellard }
250fd6ce8f6Sbellard 
2519fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
2529fa3e853Sbellard {
2539fa3e853Sbellard     if (p->code_bitmap) {
25459817ccbSbellard         qemu_free(p->code_bitmap);
2559fa3e853Sbellard         p->code_bitmap = NULL;
2569fa3e853Sbellard     }
2579fa3e853Sbellard     p->code_write_count = 0;
2589fa3e853Sbellard }
2599fa3e853Sbellard 
260fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
261fd6ce8f6Sbellard static void page_flush_tb(void)
262fd6ce8f6Sbellard {
263fd6ce8f6Sbellard     int i, j;
264fd6ce8f6Sbellard     PageDesc *p;
265fd6ce8f6Sbellard 
266fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
267fd6ce8f6Sbellard         p = l1_map[i];
268fd6ce8f6Sbellard         if (p) {
2699fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
2709fa3e853Sbellard                 p->first_tb = NULL;
2719fa3e853Sbellard                 invalidate_page_bitmap(p);
2729fa3e853Sbellard                 p++;
2739fa3e853Sbellard             }
274fd6ce8f6Sbellard         }
275fd6ce8f6Sbellard     }
276fd6ce8f6Sbellard }
277fd6ce8f6Sbellard 
278fd6ce8f6Sbellard /* flush all the translation blocks */
279d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
2800124311eSbellard void tb_flush(CPUState *env)
281fd6ce8f6Sbellard {
2820124311eSbellard #if defined(DEBUG_FLUSH)
283fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
284fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
285fd6ce8f6Sbellard            nb_tbs,
2860124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
287fd6ce8f6Sbellard #endif
288fd6ce8f6Sbellard     nb_tbs = 0;
2898a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2909fa3e853Sbellard 
2918a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
292fd6ce8f6Sbellard     page_flush_tb();
2939fa3e853Sbellard 
294fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
295d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
296d4e8164fSbellard        expensive */
297e3db7226Sbellard     tb_flush_count++;
298fd6ce8f6Sbellard }
299fd6ce8f6Sbellard 
300fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
301fd6ce8f6Sbellard 
302fd6ce8f6Sbellard static void tb_invalidate_check(unsigned long address)
303fd6ce8f6Sbellard {
304fd6ce8f6Sbellard     TranslationBlock *tb;
305fd6ce8f6Sbellard     int i;
306fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
307fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
310fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
311fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
312fd6ce8f6Sbellard                        address, tb->pc, tb->size);
313fd6ce8f6Sbellard             }
314fd6ce8f6Sbellard         }
315fd6ce8f6Sbellard     }
316fd6ce8f6Sbellard }
317fd6ce8f6Sbellard 
318fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
319fd6ce8f6Sbellard static void tb_page_check(void)
320fd6ce8f6Sbellard {
321fd6ce8f6Sbellard     TranslationBlock *tb;
322fd6ce8f6Sbellard     int i, flags1, flags2;
323fd6ce8f6Sbellard 
324fd6ce8f6Sbellard     for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
325fd6ce8f6Sbellard         for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
326fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
327fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
328fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
329fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
330fd6ce8f6Sbellard                        tb->pc, tb->size, flags1, flags2);
331fd6ce8f6Sbellard             }
332fd6ce8f6Sbellard         }
333fd6ce8f6Sbellard     }
334fd6ce8f6Sbellard }
335fd6ce8f6Sbellard 
336d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
337d4e8164fSbellard {
338d4e8164fSbellard     TranslationBlock *tb1;
339d4e8164fSbellard     unsigned int n1;
340d4e8164fSbellard 
341d4e8164fSbellard     /* suppress any remaining jumps to this TB */
342d4e8164fSbellard     tb1 = tb->jmp_first;
343d4e8164fSbellard     for(;;) {
344d4e8164fSbellard         n1 = (long)tb1 & 3;
345d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
346d4e8164fSbellard         if (n1 == 2)
347d4e8164fSbellard             break;
348d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
349d4e8164fSbellard     }
350d4e8164fSbellard     /* check end of list */
351d4e8164fSbellard     if (tb1 != tb) {
352d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
353d4e8164fSbellard     }
354d4e8164fSbellard }
355d4e8164fSbellard 
356fd6ce8f6Sbellard #endif
357fd6ce8f6Sbellard 
358fd6ce8f6Sbellard /* invalidate one TB */
359fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
360fd6ce8f6Sbellard                              int next_offset)
361fd6ce8f6Sbellard {
362fd6ce8f6Sbellard     TranslationBlock *tb1;
363fd6ce8f6Sbellard     for(;;) {
364fd6ce8f6Sbellard         tb1 = *ptb;
365fd6ce8f6Sbellard         if (tb1 == tb) {
366fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
367fd6ce8f6Sbellard             break;
368fd6ce8f6Sbellard         }
369fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
370fd6ce8f6Sbellard     }
371fd6ce8f6Sbellard }
372fd6ce8f6Sbellard 
3739fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
3749fa3e853Sbellard {
3759fa3e853Sbellard     TranslationBlock *tb1;
3769fa3e853Sbellard     unsigned int n1;
3779fa3e853Sbellard 
3789fa3e853Sbellard     for(;;) {
3799fa3e853Sbellard         tb1 = *ptb;
3809fa3e853Sbellard         n1 = (long)tb1 & 3;
3819fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
3829fa3e853Sbellard         if (tb1 == tb) {
3839fa3e853Sbellard             *ptb = tb1->page_next[n1];
3849fa3e853Sbellard             break;
3859fa3e853Sbellard         }
3869fa3e853Sbellard         ptb = &tb1->page_next[n1];
3879fa3e853Sbellard     }
3889fa3e853Sbellard }
3899fa3e853Sbellard 
390d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
391d4e8164fSbellard {
392d4e8164fSbellard     TranslationBlock *tb1, **ptb;
393d4e8164fSbellard     unsigned int n1;
394d4e8164fSbellard 
395d4e8164fSbellard     ptb = &tb->jmp_next[n];
396d4e8164fSbellard     tb1 = *ptb;
397d4e8164fSbellard     if (tb1) {
398d4e8164fSbellard         /* find tb(n) in circular list */
399d4e8164fSbellard         for(;;) {
400d4e8164fSbellard             tb1 = *ptb;
401d4e8164fSbellard             n1 = (long)tb1 & 3;
402d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
403d4e8164fSbellard             if (n1 == n && tb1 == tb)
404d4e8164fSbellard                 break;
405d4e8164fSbellard             if (n1 == 2) {
406d4e8164fSbellard                 ptb = &tb1->jmp_first;
407d4e8164fSbellard             } else {
408d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
409d4e8164fSbellard             }
410d4e8164fSbellard         }
411d4e8164fSbellard         /* now we can suppress tb(n) from the list */
412d4e8164fSbellard         *ptb = tb->jmp_next[n];
413d4e8164fSbellard 
414d4e8164fSbellard         tb->jmp_next[n] = NULL;
415d4e8164fSbellard     }
416d4e8164fSbellard }
417d4e8164fSbellard 
418d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
419d4e8164fSbellard    another TB */
420d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
421d4e8164fSbellard {
422d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
423d4e8164fSbellard }
424d4e8164fSbellard 
4259fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
426fd6ce8f6Sbellard {
427fd6ce8f6Sbellard     PageDesc *p;
4288a40a180Sbellard     unsigned int h, n1;
4299fa3e853Sbellard     target_ulong phys_pc;
4308a40a180Sbellard     TranslationBlock *tb1, *tb2;
431fd6ce8f6Sbellard 
4329fa3e853Sbellard     /* remove the TB from the hash list */
4339fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4349fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4359fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4369fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
4379fa3e853Sbellard 
4389fa3e853Sbellard     /* remove the TB from the page list */
4399fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
4409fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
4419fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4429fa3e853Sbellard         invalidate_page_bitmap(p);
4439fa3e853Sbellard     }
4449fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
4459fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
4469fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
4479fa3e853Sbellard         invalidate_page_bitmap(p);
4489fa3e853Sbellard     }
4499fa3e853Sbellard 
4508a40a180Sbellard     tb_invalidated_flag = 1;
4518a40a180Sbellard 
4528a40a180Sbellard     /* remove the TB from the hash list */
4538a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
4548a40a180Sbellard     cpu_single_env->tb_jmp_cache[h] = NULL;
4558a40a180Sbellard 
4568a40a180Sbellard     /* suppress this TB from the two jump lists */
4578a40a180Sbellard     tb_jmp_remove(tb, 0);
4588a40a180Sbellard     tb_jmp_remove(tb, 1);
4598a40a180Sbellard 
4608a40a180Sbellard     /* suppress any remaining jumps to this TB */
4618a40a180Sbellard     tb1 = tb->jmp_first;
4628a40a180Sbellard     for(;;) {
4638a40a180Sbellard         n1 = (long)tb1 & 3;
4648a40a180Sbellard         if (n1 == 2)
4658a40a180Sbellard             break;
4668a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4678a40a180Sbellard         tb2 = tb1->jmp_next[n1];
4688a40a180Sbellard         tb_reset_jump(tb1, n1);
4698a40a180Sbellard         tb1->jmp_next[n1] = NULL;
4708a40a180Sbellard         tb1 = tb2;
4718a40a180Sbellard     }
4728a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
4738a40a180Sbellard 
474e3db7226Sbellard     tb_phys_invalidate_count++;
4759fa3e853Sbellard }
4769fa3e853Sbellard 
4779fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
4789fa3e853Sbellard {
4799fa3e853Sbellard     int end, mask, end1;
4809fa3e853Sbellard 
4819fa3e853Sbellard     end = start + len;
4829fa3e853Sbellard     tab += start >> 3;
4839fa3e853Sbellard     mask = 0xff << (start & 7);
4849fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
4859fa3e853Sbellard         if (start < end) {
4869fa3e853Sbellard             mask &= ~(0xff << (end & 7));
4879fa3e853Sbellard             *tab |= mask;
4889fa3e853Sbellard         }
4899fa3e853Sbellard     } else {
4909fa3e853Sbellard         *tab++ |= mask;
4919fa3e853Sbellard         start = (start + 8) & ~7;
4929fa3e853Sbellard         end1 = end & ~7;
4939fa3e853Sbellard         while (start < end1) {
4949fa3e853Sbellard             *tab++ = 0xff;
4959fa3e853Sbellard             start += 8;
4969fa3e853Sbellard         }
4979fa3e853Sbellard         if (start < end) {
4989fa3e853Sbellard             mask = ~(0xff << (end & 7));
4999fa3e853Sbellard             *tab |= mask;
5009fa3e853Sbellard         }
5019fa3e853Sbellard     }
5029fa3e853Sbellard }
5039fa3e853Sbellard 
5049fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5059fa3e853Sbellard {
5069fa3e853Sbellard     int n, tb_start, tb_end;
5079fa3e853Sbellard     TranslationBlock *tb;
5089fa3e853Sbellard 
50959817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5109fa3e853Sbellard     if (!p->code_bitmap)
5119fa3e853Sbellard         return;
5129fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5139fa3e853Sbellard 
5149fa3e853Sbellard     tb = p->first_tb;
5159fa3e853Sbellard     while (tb != NULL) {
5169fa3e853Sbellard         n = (long)tb & 3;
5179fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5189fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5199fa3e853Sbellard         if (n == 0) {
5209fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5219fa3e853Sbellard                it is not a problem */
5229fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5239fa3e853Sbellard             tb_end = tb_start + tb->size;
5249fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5259fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5269fa3e853Sbellard         } else {
5279fa3e853Sbellard             tb_start = 0;
5289fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5299fa3e853Sbellard         }
5309fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5319fa3e853Sbellard         tb = tb->page_next[n];
5329fa3e853Sbellard     }
5339fa3e853Sbellard }
5349fa3e853Sbellard 
535d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
536d720b93dSbellard 
537d720b93dSbellard static void tb_gen_code(CPUState *env,
538d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
539d720b93dSbellard                         int cflags)
540d720b93dSbellard {
541d720b93dSbellard     TranslationBlock *tb;
542d720b93dSbellard     uint8_t *tc_ptr;
543d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
544d720b93dSbellard     int code_gen_size;
545d720b93dSbellard 
546c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
547c27004ecSbellard     tb = tb_alloc(pc);
548d720b93dSbellard     if (!tb) {
549d720b93dSbellard         /* flush must be done */
550d720b93dSbellard         tb_flush(env);
551d720b93dSbellard         /* cannot fail at this point */
552c27004ecSbellard         tb = tb_alloc(pc);
553d720b93dSbellard     }
554d720b93dSbellard     tc_ptr = code_gen_ptr;
555d720b93dSbellard     tb->tc_ptr = tc_ptr;
556d720b93dSbellard     tb->cs_base = cs_base;
557d720b93dSbellard     tb->flags = flags;
558d720b93dSbellard     tb->cflags = cflags;
559d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
561d720b93dSbellard 
562d720b93dSbellard     /* check next page if needed */
563c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
564d720b93dSbellard     phys_page2 = -1;
565c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
566d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
567d720b93dSbellard     }
568d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
569d720b93dSbellard }
570d720b93dSbellard #endif
571d720b93dSbellard 
5729fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
5739fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
574d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
575d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
576d720b93dSbellard    TB if code is modified inside this TB. */
577d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578d720b93dSbellard                                    int is_cpu_write_access)
5799fa3e853Sbellard {
580d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
581d720b93dSbellard     CPUState *env = cpu_single_env;
5829fa3e853Sbellard     PageDesc *p;
583ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
5849fa3e853Sbellard     target_ulong tb_start, tb_end;
585d720b93dSbellard     target_ulong current_pc, current_cs_base;
5869fa3e853Sbellard 
5879fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
5889fa3e853Sbellard     if (!p)
5899fa3e853Sbellard         return;
5909fa3e853Sbellard     if (!p->code_bitmap &&
591d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
592d720b93dSbellard         is_cpu_write_access) {
5939fa3e853Sbellard         /* build code bitmap */
5949fa3e853Sbellard         build_page_bitmap(p);
5959fa3e853Sbellard     }
5969fa3e853Sbellard 
5979fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
5989fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
599d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
600d720b93dSbellard     current_tb_modified = 0;
601d720b93dSbellard     current_tb = NULL; /* avoid warning */
602d720b93dSbellard     current_pc = 0; /* avoid warning */
603d720b93dSbellard     current_cs_base = 0; /* avoid warning */
604d720b93dSbellard     current_flags = 0; /* avoid warning */
6059fa3e853Sbellard     tb = p->first_tb;
6069fa3e853Sbellard     while (tb != NULL) {
6079fa3e853Sbellard         n = (long)tb & 3;
6089fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6099fa3e853Sbellard         tb_next = tb->page_next[n];
6109fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6119fa3e853Sbellard         if (n == 0) {
6129fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6139fa3e853Sbellard                it is not a problem */
6149fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6159fa3e853Sbellard             tb_end = tb_start + tb->size;
6169fa3e853Sbellard         } else {
6179fa3e853Sbellard             tb_start = tb->page_addr[1];
6189fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6199fa3e853Sbellard         }
6209fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
621d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
622d720b93dSbellard             if (current_tb_not_found) {
623d720b93dSbellard                 current_tb_not_found = 0;
624d720b93dSbellard                 current_tb = NULL;
625d720b93dSbellard                 if (env->mem_write_pc) {
626d720b93dSbellard                     /* now we have a real cpu fault */
627d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
628d720b93dSbellard                 }
629d720b93dSbellard             }
630d720b93dSbellard             if (current_tb == tb &&
631d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
632d720b93dSbellard                 /* If we are modifying the current TB, we must stop
633d720b93dSbellard                 its execution. We could be more precise by checking
634d720b93dSbellard                 that the modification is after the current PC, but it
635d720b93dSbellard                 would require a specialized function to partially
636d720b93dSbellard                 restore the CPU state */
637d720b93dSbellard 
638d720b93dSbellard                 current_tb_modified = 1;
639d720b93dSbellard                 cpu_restore_state(current_tb, env,
640d720b93dSbellard                                   env->mem_write_pc, NULL);
641d720b93dSbellard #if defined(TARGET_I386)
642d720b93dSbellard                 current_flags = env->hflags;
643d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
644d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
645d720b93dSbellard                 current_pc = current_cs_base + env->eip;
646d720b93dSbellard #else
647d720b93dSbellard #error unsupported CPU
648d720b93dSbellard #endif
649d720b93dSbellard             }
650d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
651ea1c1802Sbellard             saved_tb = env->current_tb;
652ea1c1802Sbellard             env->current_tb = NULL;
6539fa3e853Sbellard             tb_phys_invalidate(tb, -1);
654ea1c1802Sbellard             env->current_tb = saved_tb;
655ea1c1802Sbellard             if (env->interrupt_request && env->current_tb)
656ea1c1802Sbellard                 cpu_interrupt(env, env->interrupt_request);
6579fa3e853Sbellard         }
6589fa3e853Sbellard         tb = tb_next;
6599fa3e853Sbellard     }
6609fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
6619fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
6629fa3e853Sbellard     if (!p->first_tb) {
6639fa3e853Sbellard         invalidate_page_bitmap(p);
664d720b93dSbellard         if (is_cpu_write_access) {
665d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
666d720b93dSbellard         }
667d720b93dSbellard     }
668d720b93dSbellard #endif
669d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
670d720b93dSbellard     if (current_tb_modified) {
671d720b93dSbellard         /* we generate a block containing just the instruction
672d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
673d720b93dSbellard            itself */
674ea1c1802Sbellard         env->current_tb = NULL;
675d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
676d720b93dSbellard                     CF_SINGLE_INSN);
677d720b93dSbellard         cpu_resume_from_signal(env, NULL);
6789fa3e853Sbellard     }
6799fa3e853Sbellard #endif
6809fa3e853Sbellard }
6819fa3e853Sbellard 
6829fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
683d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
6849fa3e853Sbellard {
6859fa3e853Sbellard     PageDesc *p;
6869fa3e853Sbellard     int offset, b;
68759817ccbSbellard #if 0
688a4193c8aSbellard     if (1) {
689a4193c8aSbellard         if (loglevel) {
690a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
691a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
692a4193c8aSbellard                    cpu_single_env->eip,
693a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
694a4193c8aSbellard         }
69559817ccbSbellard     }
69659817ccbSbellard #endif
6979fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6989fa3e853Sbellard     if (!p)
6999fa3e853Sbellard         return;
7009fa3e853Sbellard     if (p->code_bitmap) {
7019fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7029fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7039fa3e853Sbellard         if (b & ((1 << len) - 1))
7049fa3e853Sbellard             goto do_invalidate;
7059fa3e853Sbellard     } else {
7069fa3e853Sbellard     do_invalidate:
707d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7089fa3e853Sbellard     }
7099fa3e853Sbellard }
7109fa3e853Sbellard 
7119fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
712d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
713d720b93dSbellard                                     unsigned long pc, void *puc)
7149fa3e853Sbellard {
715d720b93dSbellard     int n, current_flags, current_tb_modified;
716d720b93dSbellard     target_ulong current_pc, current_cs_base;
7179fa3e853Sbellard     PageDesc *p;
718d720b93dSbellard     TranslationBlock *tb, *current_tb;
719d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
720d720b93dSbellard     CPUState *env = cpu_single_env;
721d720b93dSbellard #endif
7229fa3e853Sbellard 
7239fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7249fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
725fd6ce8f6Sbellard     if (!p)
726fd6ce8f6Sbellard         return;
727fd6ce8f6Sbellard     tb = p->first_tb;
728d720b93dSbellard     current_tb_modified = 0;
729d720b93dSbellard     current_tb = NULL;
730d720b93dSbellard     current_pc = 0; /* avoid warning */
731d720b93dSbellard     current_cs_base = 0; /* avoid warning */
732d720b93dSbellard     current_flags = 0; /* avoid warning */
733d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
734d720b93dSbellard     if (tb && pc != 0) {
735d720b93dSbellard         current_tb = tb_find_pc(pc);
736d720b93dSbellard     }
737d720b93dSbellard #endif
738fd6ce8f6Sbellard     while (tb != NULL) {
7399fa3e853Sbellard         n = (long)tb & 3;
7409fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
741d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
742d720b93dSbellard         if (current_tb == tb &&
743d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
744d720b93dSbellard                 /* If we are modifying the current TB, we must stop
745d720b93dSbellard                    its execution. We could be more precise by checking
746d720b93dSbellard                    that the modification is after the current PC, but it
747d720b93dSbellard                    would require a specialized function to partially
748d720b93dSbellard                    restore the CPU state */
749d720b93dSbellard 
750d720b93dSbellard             current_tb_modified = 1;
751d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
752d720b93dSbellard #if defined(TARGET_I386)
753d720b93dSbellard             current_flags = env->hflags;
754d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
755d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
756d720b93dSbellard             current_pc = current_cs_base + env->eip;
757d720b93dSbellard #else
758d720b93dSbellard #error unsupported CPU
759d720b93dSbellard #endif
760d720b93dSbellard         }
761d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7629fa3e853Sbellard         tb_phys_invalidate(tb, addr);
7639fa3e853Sbellard         tb = tb->page_next[n];
764fd6ce8f6Sbellard     }
765fd6ce8f6Sbellard     p->first_tb = NULL;
766d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
767d720b93dSbellard     if (current_tb_modified) {
768d720b93dSbellard         /* we generate a block containing just the instruction
769d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
770d720b93dSbellard            itself */
771ea1c1802Sbellard         env->current_tb = NULL;
772d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
773d720b93dSbellard                     CF_SINGLE_INSN);
774d720b93dSbellard         cpu_resume_from_signal(env, puc);
775d720b93dSbellard     }
776d720b93dSbellard #endif
777fd6ce8f6Sbellard }
7789fa3e853Sbellard #endif
779fd6ce8f6Sbellard 
780fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
7819fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
7829fa3e853Sbellard                                  unsigned int n, unsigned int page_addr)
783fd6ce8f6Sbellard {
784fd6ce8f6Sbellard     PageDesc *p;
7859fa3e853Sbellard     TranslationBlock *last_first_tb;
7869fa3e853Sbellard 
7879fa3e853Sbellard     tb->page_addr[n] = page_addr;
7883a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
7899fa3e853Sbellard     tb->page_next[n] = p->first_tb;
7909fa3e853Sbellard     last_first_tb = p->first_tb;
7919fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
7929fa3e853Sbellard     invalidate_page_bitmap(p);
7939fa3e853Sbellard 
794107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
795d720b93dSbellard 
7969fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
7979fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
7989fa3e853Sbellard         unsigned long host_start, host_end, addr;
799fd6ce8f6Sbellard         int prot;
800fd6ce8f6Sbellard 
801fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
802fd6ce8f6Sbellard            page fault + mprotect overhead) */
80383fb7adfSbellard         host_start = page_addr & qemu_host_page_mask;
80483fb7adfSbellard         host_end = host_start + qemu_host_page_size;
805fd6ce8f6Sbellard         prot = 0;
806fd6ce8f6Sbellard         for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
807fd6ce8f6Sbellard             prot |= page_get_flags(addr);
80883fb7adfSbellard         mprotect((void *)host_start, qemu_host_page_size,
809fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
810fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
811fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
812fd6ce8f6Sbellard                host_start);
813fd6ce8f6Sbellard #endif
814fd6ce8f6Sbellard         p->flags &= ~PAGE_WRITE;
815fd6ce8f6Sbellard     }
8169fa3e853Sbellard #else
8179fa3e853Sbellard     /* if some code is already present, then the pages are already
8189fa3e853Sbellard        protected. So we handle the case where only the first TB is
8199fa3e853Sbellard        allocated in a physical page */
8209fa3e853Sbellard     if (!last_first_tb) {
8219fa3e853Sbellard         target_ulong virt_addr;
8229fa3e853Sbellard 
8239fa3e853Sbellard         virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
8243a7d929eSbellard         tlb_protect_code(cpu_single_env, page_addr, virt_addr);
8259fa3e853Sbellard     }
8269fa3e853Sbellard #endif
827d720b93dSbellard 
828d720b93dSbellard #endif /* TARGET_HAS_SMC */
829fd6ce8f6Sbellard }
830fd6ce8f6Sbellard 
831fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
832fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
833c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
834fd6ce8f6Sbellard {
835fd6ce8f6Sbellard     TranslationBlock *tb;
836fd6ce8f6Sbellard 
837fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
838fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
839d4e8164fSbellard         return NULL;
840fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
841fd6ce8f6Sbellard     tb->pc = pc;
842b448f2f3Sbellard     tb->cflags = 0;
843d4e8164fSbellard     return tb;
844d4e8164fSbellard }
845d4e8164fSbellard 
8469fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
8479fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
8489fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
8499fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
850d4e8164fSbellard {
8519fa3e853Sbellard     unsigned int h;
8529fa3e853Sbellard     TranslationBlock **ptb;
8539fa3e853Sbellard 
8549fa3e853Sbellard     /* add in the physical hash table */
8559fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8569fa3e853Sbellard     ptb = &tb_phys_hash[h];
8579fa3e853Sbellard     tb->phys_hash_next = *ptb;
8589fa3e853Sbellard     *ptb = tb;
859fd6ce8f6Sbellard 
860fd6ce8f6Sbellard     /* add in the page list */
8619fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
8629fa3e853Sbellard     if (phys_page2 != -1)
8639fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
8649fa3e853Sbellard     else
8659fa3e853Sbellard         tb->page_addr[1] = -1;
8669fa3e853Sbellard 
867d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
868d4e8164fSbellard     tb->jmp_next[0] = NULL;
869d4e8164fSbellard     tb->jmp_next[1] = NULL;
870b448f2f3Sbellard #ifdef USE_CODE_COPY
871b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
872b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
873b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
874b448f2f3Sbellard #endif
875d4e8164fSbellard 
876d4e8164fSbellard     /* init original jump addresses */
877d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
878d4e8164fSbellard         tb_reset_jump(tb, 0);
879d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
880d4e8164fSbellard         tb_reset_jump(tb, 1);
8818a40a180Sbellard 
8828a40a180Sbellard #ifdef DEBUG_TB_CHECK
8838a40a180Sbellard     tb_page_check();
8848a40a180Sbellard #endif
885fd6ce8f6Sbellard }
886fd6ce8f6Sbellard 
887a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
888a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
889a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
890a513fe19Sbellard {
891a513fe19Sbellard     int m_min, m_max, m;
892a513fe19Sbellard     unsigned long v;
893a513fe19Sbellard     TranslationBlock *tb;
894a513fe19Sbellard 
895a513fe19Sbellard     if (nb_tbs <= 0)
896a513fe19Sbellard         return NULL;
897a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
898a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
899a513fe19Sbellard         return NULL;
900a513fe19Sbellard     /* binary search (cf Knuth) */
901a513fe19Sbellard     m_min = 0;
902a513fe19Sbellard     m_max = nb_tbs - 1;
903a513fe19Sbellard     while (m_min <= m_max) {
904a513fe19Sbellard         m = (m_min + m_max) >> 1;
905a513fe19Sbellard         tb = &tbs[m];
906a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
907a513fe19Sbellard         if (v == tc_ptr)
908a513fe19Sbellard             return tb;
909a513fe19Sbellard         else if (tc_ptr < v) {
910a513fe19Sbellard             m_max = m - 1;
911a513fe19Sbellard         } else {
912a513fe19Sbellard             m_min = m + 1;
913a513fe19Sbellard         }
914a513fe19Sbellard     }
915a513fe19Sbellard     return &tbs[m_max];
916a513fe19Sbellard }
9177501267eSbellard 
918ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
919ea041c0eSbellard 
920ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
921ea041c0eSbellard {
922ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
923ea041c0eSbellard     unsigned int n1;
924ea041c0eSbellard 
925ea041c0eSbellard     tb1 = tb->jmp_next[n];
926ea041c0eSbellard     if (tb1 != NULL) {
927ea041c0eSbellard         /* find head of list */
928ea041c0eSbellard         for(;;) {
929ea041c0eSbellard             n1 = (long)tb1 & 3;
930ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
931ea041c0eSbellard             if (n1 == 2)
932ea041c0eSbellard                 break;
933ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
934ea041c0eSbellard         }
935ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
936ea041c0eSbellard         tb_next = tb1;
937ea041c0eSbellard 
938ea041c0eSbellard         /* remove tb from the jmp_first list */
939ea041c0eSbellard         ptb = &tb_next->jmp_first;
940ea041c0eSbellard         for(;;) {
941ea041c0eSbellard             tb1 = *ptb;
942ea041c0eSbellard             n1 = (long)tb1 & 3;
943ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
944ea041c0eSbellard             if (n1 == n && tb1 == tb)
945ea041c0eSbellard                 break;
946ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
947ea041c0eSbellard         }
948ea041c0eSbellard         *ptb = tb->jmp_next[n];
949ea041c0eSbellard         tb->jmp_next[n] = NULL;
950ea041c0eSbellard 
951ea041c0eSbellard         /* suppress the jump to next tb in generated code */
952ea041c0eSbellard         tb_reset_jump(tb, n);
953ea041c0eSbellard 
9540124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
955ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
956ea041c0eSbellard     }
957ea041c0eSbellard }
958ea041c0eSbellard 
959ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
960ea041c0eSbellard {
961ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
962ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
963ea041c0eSbellard }
964ea041c0eSbellard 
9651fddef4bSbellard #if defined(TARGET_HAS_ICE)
966d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
967d720b93dSbellard {
968d720b93dSbellard     target_ulong phys_addr;
969d720b93dSbellard 
970d720b93dSbellard     phys_addr = cpu_get_phys_page_debug(env, pc);
971d720b93dSbellard     tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
972d720b93dSbellard }
973c27004ecSbellard #endif
974d720b93dSbellard 
975c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
976c33a346eSbellard    breakpoint is reached */
9772e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
9784c3a88a2Sbellard {
9791fddef4bSbellard #if defined(TARGET_HAS_ICE)
9804c3a88a2Sbellard     int i;
9814c3a88a2Sbellard 
9824c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
9834c3a88a2Sbellard         if (env->breakpoints[i] == pc)
9844c3a88a2Sbellard             return 0;
9854c3a88a2Sbellard     }
9864c3a88a2Sbellard 
9874c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
9884c3a88a2Sbellard         return -1;
9894c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
990d720b93dSbellard 
991d720b93dSbellard     breakpoint_invalidate(env, pc);
9924c3a88a2Sbellard     return 0;
9934c3a88a2Sbellard #else
9944c3a88a2Sbellard     return -1;
9954c3a88a2Sbellard #endif
9964c3a88a2Sbellard }
9974c3a88a2Sbellard 
9984c3a88a2Sbellard /* remove a breakpoint */
9992e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
10004c3a88a2Sbellard {
10011fddef4bSbellard #if defined(TARGET_HAS_ICE)
10024c3a88a2Sbellard     int i;
10034c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
10044c3a88a2Sbellard         if (env->breakpoints[i] == pc)
10054c3a88a2Sbellard             goto found;
10064c3a88a2Sbellard     }
10074c3a88a2Sbellard     return -1;
10084c3a88a2Sbellard  found:
10094c3a88a2Sbellard     env->nb_breakpoints--;
10101fddef4bSbellard     if (i < env->nb_breakpoints)
10111fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1012d720b93dSbellard 
1013d720b93dSbellard     breakpoint_invalidate(env, pc);
10144c3a88a2Sbellard     return 0;
10154c3a88a2Sbellard #else
10164c3a88a2Sbellard     return -1;
10174c3a88a2Sbellard #endif
10184c3a88a2Sbellard }
10194c3a88a2Sbellard 
1020c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1021c33a346eSbellard    CPU loop after each instruction */
1022c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1023c33a346eSbellard {
10241fddef4bSbellard #if defined(TARGET_HAS_ICE)
1025c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1026c33a346eSbellard         env->singlestep_enabled = enabled;
1027c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
10289fa3e853Sbellard         /* XXX: only flush what is necessary */
10290124311eSbellard         tb_flush(env);
1030c33a346eSbellard     }
1031c33a346eSbellard #endif
1032c33a346eSbellard }
1033c33a346eSbellard 
103434865134Sbellard /* enable or disable low levels log */
103534865134Sbellard void cpu_set_log(int log_flags)
103634865134Sbellard {
103734865134Sbellard     loglevel = log_flags;
103834865134Sbellard     if (loglevel && !logfile) {
103934865134Sbellard         logfile = fopen(logfilename, "w");
104034865134Sbellard         if (!logfile) {
104134865134Sbellard             perror(logfilename);
104234865134Sbellard             _exit(1);
104334865134Sbellard         }
10449fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
10459fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
10469fa3e853Sbellard         {
10479fa3e853Sbellard             static uint8_t logfile_buf[4096];
10489fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
10499fa3e853Sbellard         }
10509fa3e853Sbellard #else
105134865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
10529fa3e853Sbellard #endif
105334865134Sbellard     }
105434865134Sbellard }
105534865134Sbellard 
105634865134Sbellard void cpu_set_log_filename(const char *filename)
105734865134Sbellard {
105834865134Sbellard     logfilename = strdup(filename);
105934865134Sbellard }
1060c33a346eSbellard 
10610124311eSbellard /* mask must never be zero, except for A20 change call */
106268a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1063ea041c0eSbellard {
1064ea041c0eSbellard     TranslationBlock *tb;
1065ee8b7021Sbellard     static int interrupt_lock;
1066ea041c0eSbellard 
106768a79315Sbellard     env->interrupt_request |= mask;
1068ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1069ea041c0eSbellard        all the potentially executing TB */
1070ea041c0eSbellard     tb = env->current_tb;
1071ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1072ee8b7021Sbellard         env->current_tb = NULL;
1073ea041c0eSbellard         tb_reset_jump_recursive(tb);
1074ee8b7021Sbellard         interrupt_lock = 0;
1075ea041c0eSbellard     }
1076ea041c0eSbellard }
1077ea041c0eSbellard 
1078b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1079b54ad049Sbellard {
1080b54ad049Sbellard     env->interrupt_request &= ~mask;
1081b54ad049Sbellard }
1082b54ad049Sbellard 
1083f193c797Sbellard CPULogItem cpu_log_items[] = {
1084f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1085f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1086f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1087f193c797Sbellard       "show target assembly code for each compiled TB" },
1088f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1089f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1090f193c797Sbellard #ifdef TARGET_I386
1091f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1092f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1093f193c797Sbellard #endif
1094f193c797Sbellard     { CPU_LOG_INT, "int",
1095f193c797Sbellard       "show interrupts/exceptions in short format" },
1096f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1097f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
10989fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
10999fddaa0cSbellard       "show CPU state before bloc translation" },
1100f193c797Sbellard #ifdef TARGET_I386
1101f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1102f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1103f193c797Sbellard #endif
11048e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1105fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1106fd872598Sbellard       "show all i/o ports accesses" },
11078e3a9fd2Sbellard #endif
1108f193c797Sbellard     { 0, NULL, NULL },
1109f193c797Sbellard };
1110f193c797Sbellard 
1111f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1112f193c797Sbellard {
1113f193c797Sbellard     if (strlen(s2) != n)
1114f193c797Sbellard         return 0;
1115f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1116f193c797Sbellard }
1117f193c797Sbellard 
1118f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1119f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1120f193c797Sbellard {
1121f193c797Sbellard     CPULogItem *item;
1122f193c797Sbellard     int mask;
1123f193c797Sbellard     const char *p, *p1;
1124f193c797Sbellard 
1125f193c797Sbellard     p = str;
1126f193c797Sbellard     mask = 0;
1127f193c797Sbellard     for(;;) {
1128f193c797Sbellard         p1 = strchr(p, ',');
1129f193c797Sbellard         if (!p1)
1130f193c797Sbellard             p1 = p + strlen(p);
11318e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
11328e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
11338e3a9fd2Sbellard 			mask |= item->mask;
11348e3a9fd2Sbellard 		}
11358e3a9fd2Sbellard 	} else {
1136f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1137f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1138f193c797Sbellard                 goto found;
1139f193c797Sbellard         }
1140f193c797Sbellard         return 0;
11418e3a9fd2Sbellard 	}
1142f193c797Sbellard     found:
1143f193c797Sbellard         mask |= item->mask;
1144f193c797Sbellard         if (*p1 != ',')
1145f193c797Sbellard             break;
1146f193c797Sbellard         p = p1 + 1;
1147f193c797Sbellard     }
1148f193c797Sbellard     return mask;
1149f193c797Sbellard }
1150ea041c0eSbellard 
11517501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
11527501267eSbellard {
11537501267eSbellard     va_list ap;
11547501267eSbellard 
11557501267eSbellard     va_start(ap, fmt);
11567501267eSbellard     fprintf(stderr, "qemu: fatal: ");
11577501267eSbellard     vfprintf(stderr, fmt, ap);
11587501267eSbellard     fprintf(stderr, "\n");
11597501267eSbellard #ifdef TARGET_I386
11607fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
11617fe48483Sbellard #else
11627fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
11637501267eSbellard #endif
11647501267eSbellard     va_end(ap);
11657501267eSbellard     abort();
11667501267eSbellard }
11677501267eSbellard 
11680124311eSbellard #if !defined(CONFIG_USER_ONLY)
11690124311eSbellard 
1170ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1171ee8b7021Sbellard    implemented yet) */
1172ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
117333417e70Sbellard {
117433417e70Sbellard     int i;
11750124311eSbellard 
11769fa3e853Sbellard #if defined(DEBUG_TLB)
11779fa3e853Sbellard     printf("tlb_flush:\n");
11789fa3e853Sbellard #endif
11790124311eSbellard     /* must reset current TB so that interrupts cannot modify the
11800124311eSbellard        links while we are modifying them */
11810124311eSbellard     env->current_tb = NULL;
11820124311eSbellard 
118333417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
118433417e70Sbellard         env->tlb_read[0][i].address = -1;
118533417e70Sbellard         env->tlb_write[0][i].address = -1;
118633417e70Sbellard         env->tlb_read[1][i].address = -1;
118733417e70Sbellard         env->tlb_write[1][i].address = -1;
118833417e70Sbellard     }
11899fa3e853Sbellard 
11908a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
11919fa3e853Sbellard 
11929fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
11939fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
11949fa3e853Sbellard #endif
11950a962c02Sbellard #ifdef USE_KQEMU
11960a962c02Sbellard     if (env->kqemu_enabled) {
11970a962c02Sbellard         kqemu_flush(env, flush_global);
11980a962c02Sbellard     }
11990a962c02Sbellard #endif
1200e3db7226Sbellard     tlb_flush_count++;
120133417e70Sbellard }
120233417e70Sbellard 
1203274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
120461382a50Sbellard {
120561382a50Sbellard     if (addr == (tlb_entry->address &
120661382a50Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
120761382a50Sbellard         tlb_entry->address = -1;
120861382a50Sbellard }
120961382a50Sbellard 
12102e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
121133417e70Sbellard {
12128a40a180Sbellard     int i;
12139fa3e853Sbellard     TranslationBlock *tb;
12140124311eSbellard 
12159fa3e853Sbellard #if defined(DEBUG_TLB)
1216108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
12179fa3e853Sbellard #endif
12180124311eSbellard     /* must reset current TB so that interrupts cannot modify the
12190124311eSbellard        links while we are modifying them */
12200124311eSbellard     env->current_tb = NULL;
122133417e70Sbellard 
122261382a50Sbellard     addr &= TARGET_PAGE_MASK;
122333417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
122461382a50Sbellard     tlb_flush_entry(&env->tlb_read[0][i], addr);
122561382a50Sbellard     tlb_flush_entry(&env->tlb_write[0][i], addr);
122661382a50Sbellard     tlb_flush_entry(&env->tlb_read[1][i], addr);
122761382a50Sbellard     tlb_flush_entry(&env->tlb_write[1][i], addr);
12280124311eSbellard 
12298a40a180Sbellard     for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
12308a40a180Sbellard         tb = env->tb_jmp_cache[i];
12318a40a180Sbellard         if (tb &&
12328a40a180Sbellard             ((tb->pc & TARGET_PAGE_MASK) == addr ||
12338a40a180Sbellard              ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
12348a40a180Sbellard             env->tb_jmp_cache[i] = NULL;
12359fa3e853Sbellard         }
123661382a50Sbellard     }
123761382a50Sbellard 
12389fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12399fa3e853Sbellard     if (addr < MMAP_AREA_END)
12409fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
12419fa3e853Sbellard #endif
12420a962c02Sbellard #ifdef USE_KQEMU
12430a962c02Sbellard     if (env->kqemu_enabled) {
12440a962c02Sbellard         kqemu_flush_page(env, addr);
12450a962c02Sbellard     }
12460a962c02Sbellard #endif
12479fa3e853Sbellard }
12489fa3e853Sbellard 
12494f2ac237Sbellard static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
12509fa3e853Sbellard {
12519fa3e853Sbellard     if (addr == (tlb_entry->address &
12529fa3e853Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
12533a7d929eSbellard         (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
12543a7d929eSbellard         tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
12559fa3e853Sbellard     }
12569fa3e853Sbellard }
12579fa3e853Sbellard 
12589fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
12599fa3e853Sbellard    can be detected */
12603a7d929eSbellard static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
12613a7d929eSbellard                              target_ulong vaddr)
126261382a50Sbellard {
126361382a50Sbellard     int i;
126461382a50Sbellard 
12653a7d929eSbellard     vaddr &= TARGET_PAGE_MASK;
12663a7d929eSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
12673a7d929eSbellard     tlb_protect_code1(&env->tlb_write[0][i], vaddr);
12683a7d929eSbellard     tlb_protect_code1(&env->tlb_write[1][i], vaddr);
12693a7d929eSbellard 
12703a7d929eSbellard #ifdef USE_KQEMU
12713a7d929eSbellard     if (env->kqemu_enabled) {
12723a7d929eSbellard         kqemu_set_notdirty(env, ram_addr);
12733a7d929eSbellard     }
12743a7d929eSbellard #endif
1275f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
12763a7d929eSbellard 
12779fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
12789fa3e853Sbellard     /* NOTE: as we generated the code for this page, it is already at
12799fa3e853Sbellard        least readable */
12803a7d929eSbellard     if (vaddr < MMAP_AREA_END)
12813a7d929eSbellard         mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
12829fa3e853Sbellard #endif
12839fa3e853Sbellard }
12849fa3e853Sbellard 
12859fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
12863a7d929eSbellard    tested for self modifying code */
12873a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
12883a7d929eSbellard                                     target_ulong vaddr)
12899fa3e853Sbellard {
12903a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
12919fa3e853Sbellard }
12929fa3e853Sbellard 
12931ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
12941ccde1cbSbellard                                          unsigned long start, unsigned long length)
12951ccde1cbSbellard {
12961ccde1cbSbellard     unsigned long addr;
12971ccde1cbSbellard     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
12981ccde1cbSbellard         addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
12991ccde1cbSbellard         if ((addr - start) < length) {
13001ccde1cbSbellard             tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
13011ccde1cbSbellard         }
13021ccde1cbSbellard     }
13031ccde1cbSbellard }
13041ccde1cbSbellard 
13053a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
13060a962c02Sbellard                                      int dirty_flags)
13071ccde1cbSbellard {
13081ccde1cbSbellard     CPUState *env;
13094f2ac237Sbellard     unsigned long length, start1;
13100a962c02Sbellard     int i, mask, len;
13110a962c02Sbellard     uint8_t *p;
13121ccde1cbSbellard 
13131ccde1cbSbellard     start &= TARGET_PAGE_MASK;
13141ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
13151ccde1cbSbellard 
13161ccde1cbSbellard     length = end - start;
13171ccde1cbSbellard     if (length == 0)
13181ccde1cbSbellard         return;
13190a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
13201ccde1cbSbellard     env = cpu_single_env;
13213a7d929eSbellard #ifdef USE_KQEMU
13223a7d929eSbellard     if (env->kqemu_enabled) {
1323f23db169Sbellard         ram_addr_t addr;
1324f23db169Sbellard         addr = start;
1325f23db169Sbellard         for(i = 0; i < len; i++) {
1326f23db169Sbellard             kqemu_set_notdirty(env, addr);
1327f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1328f23db169Sbellard         }
13293a7d929eSbellard     }
13303a7d929eSbellard #endif
1331f23db169Sbellard     mask = ~dirty_flags;
1332f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1333f23db169Sbellard     for(i = 0; i < len; i++)
1334f23db169Sbellard         p[i] &= mask;
1335f23db169Sbellard 
13361ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
13371ccde1cbSbellard        when accessing the range */
133859817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
13391ccde1cbSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
134059817ccbSbellard         tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
13411ccde1cbSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
134259817ccbSbellard         tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
134359817ccbSbellard 
134459817ccbSbellard #if !defined(CONFIG_SOFTMMU)
134559817ccbSbellard     /* XXX: this is expensive */
134659817ccbSbellard     {
134759817ccbSbellard         VirtPageDesc *p;
134859817ccbSbellard         int j;
134959817ccbSbellard         target_ulong addr;
135059817ccbSbellard 
135159817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
135259817ccbSbellard             p = l1_virt_map[i];
135359817ccbSbellard             if (p) {
135459817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
135559817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
135659817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
135759817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
135859817ccbSbellard                         (p->prot & PROT_WRITE)) {
135959817ccbSbellard                         if (addr < MMAP_AREA_END) {
136059817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
136159817ccbSbellard                                      p->prot & ~PROT_WRITE);
136259817ccbSbellard                         }
136359817ccbSbellard                     }
136459817ccbSbellard                     addr += TARGET_PAGE_SIZE;
136559817ccbSbellard                     p++;
136659817ccbSbellard                 }
136759817ccbSbellard             }
136859817ccbSbellard         }
136959817ccbSbellard     }
137059817ccbSbellard #endif
13711ccde1cbSbellard }
13721ccde1cbSbellard 
13733a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
13743a7d929eSbellard {
13753a7d929eSbellard     ram_addr_t ram_addr;
13763a7d929eSbellard 
13773a7d929eSbellard     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
13783a7d929eSbellard         ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
13793a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
13803a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
13813a7d929eSbellard             tlb_entry->address |= IO_MEM_NOTDIRTY;
13823a7d929eSbellard         }
13833a7d929eSbellard     }
13843a7d929eSbellard }
13853a7d929eSbellard 
13863a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
13873a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
13883a7d929eSbellard {
13893a7d929eSbellard     int i;
13903a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
13913a7d929eSbellard         tlb_update_dirty(&env->tlb_write[0][i]);
13923a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
13933a7d929eSbellard         tlb_update_dirty(&env->tlb_write[1][i]);
13943a7d929eSbellard }
13953a7d929eSbellard 
13961ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
13971ccde1cbSbellard                                   unsigned long start)
13981ccde1cbSbellard {
13991ccde1cbSbellard     unsigned long addr;
14001ccde1cbSbellard     if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
14011ccde1cbSbellard         addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
14021ccde1cbSbellard         if (addr == start) {
14031ccde1cbSbellard             tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
14041ccde1cbSbellard         }
14051ccde1cbSbellard     }
14061ccde1cbSbellard }
14071ccde1cbSbellard 
14081ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
14091ccde1cbSbellard    addr so that it is no longer dirty */
14101ccde1cbSbellard static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
14111ccde1cbSbellard {
14121ccde1cbSbellard     CPUState *env = cpu_single_env;
14131ccde1cbSbellard     int i;
14141ccde1cbSbellard 
14151ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
14161ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
14171ccde1cbSbellard     tlb_set_dirty1(&env->tlb_write[0][i], addr);
14181ccde1cbSbellard     tlb_set_dirty1(&env->tlb_write[1][i], addr);
14191ccde1cbSbellard }
14201ccde1cbSbellard 
142159817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
142259817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
142359817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
142459817ccbSbellard    conflicting with the host address space). */
14252e12669aSbellard int tlb_set_page(CPUState *env, target_ulong vaddr,
14262e12669aSbellard                  target_phys_addr_t paddr, int prot,
14279fa3e853Sbellard                  int is_user, int is_softmmu)
14289fa3e853Sbellard {
142992e873b9Sbellard     PhysPageDesc *p;
14304f2ac237Sbellard     unsigned long pd;
14319fa3e853Sbellard     unsigned int index;
14324f2ac237Sbellard     target_ulong address;
1433108c49b8Sbellard     target_phys_addr_t addend;
14349fa3e853Sbellard     int ret;
14359fa3e853Sbellard 
143692e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
14379fa3e853Sbellard     if (!p) {
14389fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
14399fa3e853Sbellard     } else {
14409fa3e853Sbellard         pd = p->phys_offset;
14419fa3e853Sbellard     }
14429fa3e853Sbellard #if defined(DEBUG_TLB)
14433a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
14443a7d929eSbellard            vaddr, paddr, prot, is_user, is_softmmu, pd);
14459fa3e853Sbellard #endif
14469fa3e853Sbellard 
14479fa3e853Sbellard     ret = 0;
14489fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14499fa3e853Sbellard     if (is_softmmu)
14509fa3e853Sbellard #endif
14519fa3e853Sbellard     {
14529fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
14539fa3e853Sbellard             /* IO memory case */
14549fa3e853Sbellard             address = vaddr | pd;
14559fa3e853Sbellard             addend = paddr;
14569fa3e853Sbellard         } else {
14579fa3e853Sbellard             /* standard memory */
14589fa3e853Sbellard             address = vaddr;
14599fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
14609fa3e853Sbellard         }
14619fa3e853Sbellard 
146290f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
14639fa3e853Sbellard         addend -= vaddr;
146467b915a5Sbellard         if (prot & PAGE_READ) {
14659fa3e853Sbellard             env->tlb_read[is_user][index].address = address;
14669fa3e853Sbellard             env->tlb_read[is_user][index].addend = addend;
14679fa3e853Sbellard         } else {
14689fa3e853Sbellard             env->tlb_read[is_user][index].address = -1;
14699fa3e853Sbellard             env->tlb_read[is_user][index].addend = -1;
14709fa3e853Sbellard         }
147167b915a5Sbellard         if (prot & PAGE_WRITE) {
14729fa3e853Sbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
14739fa3e853Sbellard                 /* ROM: access is ignored (same as unassigned) */
14749fa3e853Sbellard                 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
14751ccde1cbSbellard                 env->tlb_write[is_user][index].addend = addend;
14763a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
14771ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
14781ccde1cbSbellard                 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
14791ccde1cbSbellard                 env->tlb_write[is_user][index].addend = addend;
14809fa3e853Sbellard             } else {
14819fa3e853Sbellard                 env->tlb_write[is_user][index].address = address;
14829fa3e853Sbellard                 env->tlb_write[is_user][index].addend = addend;
14839fa3e853Sbellard             }
14849fa3e853Sbellard         } else {
14859fa3e853Sbellard             env->tlb_write[is_user][index].address = -1;
14869fa3e853Sbellard             env->tlb_write[is_user][index].addend = -1;
14879fa3e853Sbellard         }
14889fa3e853Sbellard     }
14899fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14909fa3e853Sbellard     else {
14919fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
14929fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
14939fa3e853Sbellard                soft MMU */
14949fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
14959fa3e853Sbellard                 ret = 2;
14969fa3e853Sbellard         } else {
14979fa3e853Sbellard             void *map_addr;
149859817ccbSbellard 
149959817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
150059817ccbSbellard                 ret = 2;
150159817ccbSbellard             } else {
15029fa3e853Sbellard                 if (prot & PROT_WRITE) {
150359817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1504d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
150559817ccbSbellard                         first_tb ||
1506d720b93dSbellard #endif
150759817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
150859817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
15099fa3e853Sbellard                         /* ROM: we do as if code was inside */
15109fa3e853Sbellard                         /* if code is present, we only map as read only and save the
15119fa3e853Sbellard                            original mapping */
15129fa3e853Sbellard                         VirtPageDesc *vp;
15139fa3e853Sbellard 
151490f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
15159fa3e853Sbellard                         vp->phys_addr = pd;
15169fa3e853Sbellard                         vp->prot = prot;
15179fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
15189fa3e853Sbellard                         prot &= ~PAGE_WRITE;
15199fa3e853Sbellard                     }
15209fa3e853Sbellard                 }
15219fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
15229fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
15239fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
15249fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
15259fa3e853Sbellard                               paddr, vaddr);
15269fa3e853Sbellard                 }
15279fa3e853Sbellard             }
15289fa3e853Sbellard         }
152959817ccbSbellard     }
15309fa3e853Sbellard #endif
15319fa3e853Sbellard     return ret;
15329fa3e853Sbellard }
15339fa3e853Sbellard 
15349fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
15359fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
1536d720b93dSbellard int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
15379fa3e853Sbellard {
15389fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15399fa3e853Sbellard     VirtPageDesc *vp;
15409fa3e853Sbellard 
15419fa3e853Sbellard #if defined(DEBUG_TLB)
15429fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
15439fa3e853Sbellard #endif
15449fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
154559817ccbSbellard 
154659817ccbSbellard     /* if it is not mapped, no need to worry here */
154759817ccbSbellard     if (addr >= MMAP_AREA_END)
154859817ccbSbellard         return 0;
15499fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
15509fa3e853Sbellard     if (!vp)
15519fa3e853Sbellard         return 0;
15529fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
15539fa3e853Sbellard        validates only the code TLB */
15549fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
15559fa3e853Sbellard         return 0;
15569fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
15579fa3e853Sbellard         return 0;
15589fa3e853Sbellard #if defined(DEBUG_TLB)
15599fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
15609fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
15619fa3e853Sbellard #endif
156259817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
156359817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
156459817ccbSbellard                   (unsigned long)addr, vp->prot);
1565d720b93dSbellard     /* set the dirty bit */
15660a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1567d720b93dSbellard     /* flush the code inside */
1568d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
15699fa3e853Sbellard     return 1;
15709fa3e853Sbellard #else
15719fa3e853Sbellard     return 0;
15729fa3e853Sbellard #endif
157333417e70Sbellard }
157433417e70Sbellard 
15750124311eSbellard #else
15760124311eSbellard 
1577ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
15780124311eSbellard {
15790124311eSbellard }
15800124311eSbellard 
15812e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
15820124311eSbellard {
15830124311eSbellard }
15840124311eSbellard 
15852e12669aSbellard int tlb_set_page(CPUState *env, target_ulong vaddr,
15862e12669aSbellard                  target_phys_addr_t paddr, int prot,
15879fa3e853Sbellard                  int is_user, int is_softmmu)
158833417e70Sbellard {
15899fa3e853Sbellard     return 0;
159033417e70Sbellard }
159133417e70Sbellard 
15929fa3e853Sbellard /* dump memory mappings */
15939fa3e853Sbellard void page_dump(FILE *f)
159433417e70Sbellard {
15959fa3e853Sbellard     unsigned long start, end;
15969fa3e853Sbellard     int i, j, prot, prot1;
15979fa3e853Sbellard     PageDesc *p;
15989fa3e853Sbellard 
15999fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
16009fa3e853Sbellard             "start", "end", "size", "prot");
16019fa3e853Sbellard     start = -1;
16029fa3e853Sbellard     end = -1;
16039fa3e853Sbellard     prot = 0;
16049fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
16059fa3e853Sbellard         if (i < L1_SIZE)
16069fa3e853Sbellard             p = l1_map[i];
16079fa3e853Sbellard         else
16089fa3e853Sbellard             p = NULL;
16099fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
161033417e70Sbellard             if (!p)
16119fa3e853Sbellard                 prot1 = 0;
16129fa3e853Sbellard             else
16139fa3e853Sbellard                 prot1 = p[j].flags;
16149fa3e853Sbellard             if (prot1 != prot) {
16159fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
16169fa3e853Sbellard                 if (start != -1) {
16179fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
16189fa3e853Sbellard                             start, end, end - start,
16199fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
16209fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
16219fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
162233417e70Sbellard                 }
16239fa3e853Sbellard                 if (prot1 != 0)
16249fa3e853Sbellard                     start = end;
16259fa3e853Sbellard                 else
16269fa3e853Sbellard                     start = -1;
16279fa3e853Sbellard                 prot = prot1;
16289fa3e853Sbellard             }
16299fa3e853Sbellard             if (!p)
16309fa3e853Sbellard                 break;
16319fa3e853Sbellard         }
16329fa3e853Sbellard     }
16339fa3e853Sbellard }
16349fa3e853Sbellard 
16359fa3e853Sbellard int page_get_flags(unsigned long address)
16369fa3e853Sbellard {
16379fa3e853Sbellard     PageDesc *p;
16389fa3e853Sbellard 
16399fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
16409fa3e853Sbellard     if (!p)
16419fa3e853Sbellard         return 0;
16429fa3e853Sbellard     return p->flags;
16439fa3e853Sbellard }
16449fa3e853Sbellard 
16459fa3e853Sbellard /* modify the flags of a page and invalidate the code if
16469fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
16479fa3e853Sbellard    depending on PAGE_WRITE */
16489fa3e853Sbellard void page_set_flags(unsigned long start, unsigned long end, int flags)
16499fa3e853Sbellard {
16509fa3e853Sbellard     PageDesc *p;
16519fa3e853Sbellard     unsigned long addr;
16529fa3e853Sbellard 
16539fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
16549fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
16559fa3e853Sbellard     if (flags & PAGE_WRITE)
16569fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
16579fa3e853Sbellard     spin_lock(&tb_lock);
16589fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
16599fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
16609fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
16619fa3e853Sbellard            inside */
16629fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
16639fa3e853Sbellard             (flags & PAGE_WRITE) &&
16649fa3e853Sbellard             p->first_tb) {
1665d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
16669fa3e853Sbellard         }
16679fa3e853Sbellard         p->flags = flags;
16689fa3e853Sbellard     }
16699fa3e853Sbellard     spin_unlock(&tb_lock);
16709fa3e853Sbellard }
16719fa3e853Sbellard 
16729fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
16739fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
1674d720b93dSbellard int page_unprotect(unsigned long address, unsigned long pc, void *puc)
16759fa3e853Sbellard {
16769fa3e853Sbellard     unsigned int page_index, prot, pindex;
16779fa3e853Sbellard     PageDesc *p, *p1;
16789fa3e853Sbellard     unsigned long host_start, host_end, addr;
16799fa3e853Sbellard 
168083fb7adfSbellard     host_start = address & qemu_host_page_mask;
16819fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
16829fa3e853Sbellard     p1 = page_find(page_index);
16839fa3e853Sbellard     if (!p1)
16849fa3e853Sbellard         return 0;
168583fb7adfSbellard     host_end = host_start + qemu_host_page_size;
16869fa3e853Sbellard     p = p1;
16879fa3e853Sbellard     prot = 0;
16889fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
16899fa3e853Sbellard         prot |= p->flags;
16909fa3e853Sbellard         p++;
16919fa3e853Sbellard     }
16929fa3e853Sbellard     /* if the page was really writable, then we change its
16939fa3e853Sbellard        protection back to writable */
16949fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
16959fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
16969fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
169783fb7adfSbellard             mprotect((void *)host_start, qemu_host_page_size,
16989fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
16999fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
17009fa3e853Sbellard             /* and since the content will be modified, we must invalidate
17019fa3e853Sbellard                the corresponding translated code. */
1702d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
17039fa3e853Sbellard #ifdef DEBUG_TB_CHECK
17049fa3e853Sbellard             tb_invalidate_check(address);
17059fa3e853Sbellard #endif
17069fa3e853Sbellard             return 1;
17079fa3e853Sbellard         }
17089fa3e853Sbellard     }
17099fa3e853Sbellard     return 0;
17109fa3e853Sbellard }
17119fa3e853Sbellard 
17129fa3e853Sbellard /* call this function when system calls directly modify a memory area */
17139fa3e853Sbellard void page_unprotect_range(uint8_t *data, unsigned long data_size)
17149fa3e853Sbellard {
17159fa3e853Sbellard     unsigned long start, end, addr;
17169fa3e853Sbellard 
17179fa3e853Sbellard     start = (unsigned long)data;
17189fa3e853Sbellard     end = start + data_size;
17199fa3e853Sbellard     start &= TARGET_PAGE_MASK;
17209fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
17219fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1722d720b93dSbellard         page_unprotect(addr, 0, NULL);
17239fa3e853Sbellard     }
17249fa3e853Sbellard }
17259fa3e853Sbellard 
17261ccde1cbSbellard static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
17271ccde1cbSbellard {
17281ccde1cbSbellard }
17299fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
173033417e70Sbellard 
173133417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
173233417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
173333417e70Sbellard    io memory page */
17342e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
17352e12669aSbellard                                   unsigned long size,
17362e12669aSbellard                                   unsigned long phys_offset)
173733417e70Sbellard {
1738108c49b8Sbellard     target_phys_addr_t addr, end_addr;
173992e873b9Sbellard     PhysPageDesc *p;
174033417e70Sbellard 
17415fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
174233417e70Sbellard     end_addr = start_addr + size;
17435fd386f6Sbellard     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1744108c49b8Sbellard         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
17459fa3e853Sbellard         p->phys_offset = phys_offset;
17469fa3e853Sbellard         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
174733417e70Sbellard             phys_offset += TARGET_PAGE_SIZE;
174833417e70Sbellard     }
174933417e70Sbellard }
175033417e70Sbellard 
1751a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
175233417e70Sbellard {
175333417e70Sbellard     return 0;
175433417e70Sbellard }
175533417e70Sbellard 
1756a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
175733417e70Sbellard {
175833417e70Sbellard }
175933417e70Sbellard 
176033417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
176133417e70Sbellard     unassigned_mem_readb,
176233417e70Sbellard     unassigned_mem_readb,
176333417e70Sbellard     unassigned_mem_readb,
176433417e70Sbellard };
176533417e70Sbellard 
176633417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
176733417e70Sbellard     unassigned_mem_writeb,
176833417e70Sbellard     unassigned_mem_writeb,
176933417e70Sbellard     unassigned_mem_writeb,
177033417e70Sbellard };
177133417e70Sbellard 
1772a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
17731ccde1cbSbellard {
17743a7d929eSbellard     unsigned long ram_addr;
17753a7d929eSbellard     int dirty_flags;
17763a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
17773a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
17783a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
17793a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
17803a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
17813a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
17823a7d929eSbellard #endif
17833a7d929eSbellard     }
1784c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
1785f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1786f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1787f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1788f23db169Sbellard        flushed */
1789f23db169Sbellard     if (dirty_flags == 0xff)
1790d720b93dSbellard         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
17911ccde1cbSbellard }
17921ccde1cbSbellard 
1793a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
17941ccde1cbSbellard {
17953a7d929eSbellard     unsigned long ram_addr;
17963a7d929eSbellard     int dirty_flags;
17973a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
17983a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
17993a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18003a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18013a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
18023a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18033a7d929eSbellard #endif
18043a7d929eSbellard     }
1805c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
1806f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1807f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1808f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1809f23db169Sbellard        flushed */
1810f23db169Sbellard     if (dirty_flags == 0xff)
1811d720b93dSbellard         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
18121ccde1cbSbellard }
18131ccde1cbSbellard 
1814a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
18151ccde1cbSbellard {
18163a7d929eSbellard     unsigned long ram_addr;
18173a7d929eSbellard     int dirty_flags;
18183a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
18193a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18203a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
18213a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
18223a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
18233a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
18243a7d929eSbellard #endif
18253a7d929eSbellard     }
1826c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
1827f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1830f23db169Sbellard        flushed */
1831f23db169Sbellard     if (dirty_flags == 0xff)
1832d720b93dSbellard         tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
18331ccde1cbSbellard }
18341ccde1cbSbellard 
18353a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
18363a7d929eSbellard     NULL, /* never used */
18373a7d929eSbellard     NULL, /* never used */
18383a7d929eSbellard     NULL, /* never used */
18393a7d929eSbellard };
18403a7d929eSbellard 
18411ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
18421ccde1cbSbellard     notdirty_mem_writeb,
18431ccde1cbSbellard     notdirty_mem_writew,
18441ccde1cbSbellard     notdirty_mem_writel,
18451ccde1cbSbellard };
18461ccde1cbSbellard 
184733417e70Sbellard static void io_mem_init(void)
184833417e70Sbellard {
18493a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1850a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
18513a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
18521ccde1cbSbellard     io_mem_nb = 5;
18531ccde1cbSbellard 
18541ccde1cbSbellard     /* alloc dirty bits array */
18550a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
18563a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
185733417e70Sbellard }
185833417e70Sbellard 
185933417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
186033417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
186133417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
186233417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
186333417e70Sbellard    allocated. The return value can be used with
186433417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
186533417e70Sbellard int cpu_register_io_memory(int io_index,
186633417e70Sbellard                            CPUReadMemoryFunc **mem_read,
1867a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
1868a4193c8aSbellard                            void *opaque)
186933417e70Sbellard {
187033417e70Sbellard     int i;
187133417e70Sbellard 
187233417e70Sbellard     if (io_index <= 0) {
187333417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
187433417e70Sbellard             return -1;
187533417e70Sbellard         io_index = io_mem_nb++;
187633417e70Sbellard     } else {
187733417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
187833417e70Sbellard             return -1;
187933417e70Sbellard     }
188033417e70Sbellard 
188133417e70Sbellard     for(i = 0;i < 3; i++) {
188233417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
188333417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
188433417e70Sbellard     }
1885a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
188633417e70Sbellard     return io_index << IO_MEM_SHIFT;
188733417e70Sbellard }
188861382a50Sbellard 
18898926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
18908926b517Sbellard {
18918926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
18928926b517Sbellard }
18938926b517Sbellard 
18948926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
18958926b517Sbellard {
18968926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
18978926b517Sbellard }
18988926b517Sbellard 
189913eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
190013eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
19012e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
190213eb76e0Sbellard                             int len, int is_write)
190313eb76e0Sbellard {
190413eb76e0Sbellard     int l, flags;
190513eb76e0Sbellard     target_ulong page;
190613eb76e0Sbellard 
190713eb76e0Sbellard     while (len > 0) {
190813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
190913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
191013eb76e0Sbellard         if (l > len)
191113eb76e0Sbellard             l = len;
191213eb76e0Sbellard         flags = page_get_flags(page);
191313eb76e0Sbellard         if (!(flags & PAGE_VALID))
191413eb76e0Sbellard             return;
191513eb76e0Sbellard         if (is_write) {
191613eb76e0Sbellard             if (!(flags & PAGE_WRITE))
191713eb76e0Sbellard                 return;
191813eb76e0Sbellard             memcpy((uint8_t *)addr, buf, len);
191913eb76e0Sbellard         } else {
192013eb76e0Sbellard             if (!(flags & PAGE_READ))
192113eb76e0Sbellard                 return;
192213eb76e0Sbellard             memcpy(buf, (uint8_t *)addr, len);
192313eb76e0Sbellard         }
192413eb76e0Sbellard         len -= l;
192513eb76e0Sbellard         buf += l;
192613eb76e0Sbellard         addr += l;
192713eb76e0Sbellard     }
192813eb76e0Sbellard }
19298df1cd07Sbellard 
193013eb76e0Sbellard #else
19312e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
193213eb76e0Sbellard                             int len, int is_write)
193313eb76e0Sbellard {
193413eb76e0Sbellard     int l, io_index;
193513eb76e0Sbellard     uint8_t *ptr;
193613eb76e0Sbellard     uint32_t val;
19372e12669aSbellard     target_phys_addr_t page;
19382e12669aSbellard     unsigned long pd;
193992e873b9Sbellard     PhysPageDesc *p;
194013eb76e0Sbellard 
194113eb76e0Sbellard     while (len > 0) {
194213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
194313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
194413eb76e0Sbellard         if (l > len)
194513eb76e0Sbellard             l = len;
194692e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
194713eb76e0Sbellard         if (!p) {
194813eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
194913eb76e0Sbellard         } else {
195013eb76e0Sbellard             pd = p->phys_offset;
195113eb76e0Sbellard         }
195213eb76e0Sbellard 
195313eb76e0Sbellard         if (is_write) {
19543a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
195513eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
195613eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
19571c213d19Sbellard                     /* 32 bit write access */
1958c27004ecSbellard                     val = ldl_p(buf);
1959a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
196013eb76e0Sbellard                     l = 4;
196113eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
19621c213d19Sbellard                     /* 16 bit write access */
1963c27004ecSbellard                     val = lduw_p(buf);
1964a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
196513eb76e0Sbellard                     l = 2;
196613eb76e0Sbellard                 } else {
19671c213d19Sbellard                     /* 8 bit write access */
1968c27004ecSbellard                     val = ldub_p(buf);
1969a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
197013eb76e0Sbellard                     l = 1;
197113eb76e0Sbellard                 }
197213eb76e0Sbellard             } else {
1973b448f2f3Sbellard                 unsigned long addr1;
1974b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
197513eb76e0Sbellard                 /* RAM case */
1976b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
197713eb76e0Sbellard                 memcpy(ptr, buf, l);
19783a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
1979b448f2f3Sbellard                     /* invalidate code */
1980b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1981b448f2f3Sbellard                     /* set dirty bit */
1982f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
198413eb76e0Sbellard                 }
19853a7d929eSbellard             }
198613eb76e0Sbellard         } else {
19873a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
198813eb76e0Sbellard                 /* I/O case */
198913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
199013eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
199113eb76e0Sbellard                     /* 32 bit read access */
1992a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1993c27004ecSbellard                     stl_p(buf, val);
199413eb76e0Sbellard                     l = 4;
199513eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
199613eb76e0Sbellard                     /* 16 bit read access */
1997a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1998c27004ecSbellard                     stw_p(buf, val);
199913eb76e0Sbellard                     l = 2;
200013eb76e0Sbellard                 } else {
20011c213d19Sbellard                     /* 8 bit read access */
2002a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2003c27004ecSbellard                     stb_p(buf, val);
200413eb76e0Sbellard                     l = 1;
200513eb76e0Sbellard                 }
200613eb76e0Sbellard             } else {
200713eb76e0Sbellard                 /* RAM case */
200813eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
200913eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
201013eb76e0Sbellard                 memcpy(buf, ptr, l);
201113eb76e0Sbellard             }
201213eb76e0Sbellard         }
201313eb76e0Sbellard         len -= l;
201413eb76e0Sbellard         buf += l;
201513eb76e0Sbellard         addr += l;
201613eb76e0Sbellard     }
201713eb76e0Sbellard }
20188df1cd07Sbellard 
20198df1cd07Sbellard /* warning: addr must be aligned */
20208df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
20218df1cd07Sbellard {
20228df1cd07Sbellard     int io_index;
20238df1cd07Sbellard     uint8_t *ptr;
20248df1cd07Sbellard     uint32_t val;
20258df1cd07Sbellard     unsigned long pd;
20268df1cd07Sbellard     PhysPageDesc *p;
20278df1cd07Sbellard 
20288df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
20298df1cd07Sbellard     if (!p) {
20308df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
20318df1cd07Sbellard     } else {
20328df1cd07Sbellard         pd = p->phys_offset;
20338df1cd07Sbellard     }
20348df1cd07Sbellard 
20353a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
20368df1cd07Sbellard         /* I/O case */
20378df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20388df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
20398df1cd07Sbellard     } else {
20408df1cd07Sbellard         /* RAM case */
20418df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
20428df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
20438df1cd07Sbellard         val = ldl_p(ptr);
20448df1cd07Sbellard     }
20458df1cd07Sbellard     return val;
20468df1cd07Sbellard }
20478df1cd07Sbellard 
2048aab33094Sbellard /* XXX: optimize */
2049aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2050aab33094Sbellard {
2051aab33094Sbellard     uint8_t val;
2052aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2053aab33094Sbellard     return val;
2054aab33094Sbellard }
2055aab33094Sbellard 
2056aab33094Sbellard /* XXX: optimize */
2057aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2058aab33094Sbellard {
2059aab33094Sbellard     uint16_t val;
2060aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061aab33094Sbellard     return tswap16(val);
2062aab33094Sbellard }
2063aab33094Sbellard 
2064aab33094Sbellard /* XXX: optimize */
2065aab33094Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
2066aab33094Sbellard {
2067aab33094Sbellard     uint64_t val;
2068aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069aab33094Sbellard     return tswap64(val);
2070aab33094Sbellard }
2071aab33094Sbellard 
20728df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
20738df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
20748df1cd07Sbellard    bits are used to track modified PTEs */
20758df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
20768df1cd07Sbellard {
20778df1cd07Sbellard     int io_index;
20788df1cd07Sbellard     uint8_t *ptr;
20798df1cd07Sbellard     unsigned long pd;
20808df1cd07Sbellard     PhysPageDesc *p;
20818df1cd07Sbellard 
20828df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
20838df1cd07Sbellard     if (!p) {
20848df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
20858df1cd07Sbellard     } else {
20868df1cd07Sbellard         pd = p->phys_offset;
20878df1cd07Sbellard     }
20888df1cd07Sbellard 
20893a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
20908df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
20918df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
20928df1cd07Sbellard     } else {
20938df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
20948df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
20958df1cd07Sbellard         stl_p(ptr, val);
20968df1cd07Sbellard     }
20978df1cd07Sbellard }
20988df1cd07Sbellard 
20998df1cd07Sbellard /* warning: addr must be aligned */
21008df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
21018df1cd07Sbellard {
21028df1cd07Sbellard     int io_index;
21038df1cd07Sbellard     uint8_t *ptr;
21048df1cd07Sbellard     unsigned long pd;
21058df1cd07Sbellard     PhysPageDesc *p;
21068df1cd07Sbellard 
21078df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
21088df1cd07Sbellard     if (!p) {
21098df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
21108df1cd07Sbellard     } else {
21118df1cd07Sbellard         pd = p->phys_offset;
21128df1cd07Sbellard     }
21138df1cd07Sbellard 
21143a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
21158df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
21168df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
21178df1cd07Sbellard     } else {
21188df1cd07Sbellard         unsigned long addr1;
21198df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
21208df1cd07Sbellard         /* RAM case */
21218df1cd07Sbellard         ptr = phys_ram_base + addr1;
21228df1cd07Sbellard         stl_p(ptr, val);
21233a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
21248df1cd07Sbellard             /* invalidate code */
21258df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
21268df1cd07Sbellard             /* set dirty bit */
2127f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
21298df1cd07Sbellard         }
21308df1cd07Sbellard     }
21313a7d929eSbellard }
21328df1cd07Sbellard 
2133aab33094Sbellard /* XXX: optimize */
2134aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2135aab33094Sbellard {
2136aab33094Sbellard     uint8_t v = val;
2137aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2138aab33094Sbellard }
2139aab33094Sbellard 
2140aab33094Sbellard /* XXX: optimize */
2141aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2142aab33094Sbellard {
2143aab33094Sbellard     uint16_t v = tswap16(val);
2144aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2145aab33094Sbellard }
2146aab33094Sbellard 
2147aab33094Sbellard /* XXX: optimize */
2148aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2149aab33094Sbellard {
2150aab33094Sbellard     val = tswap64(val);
2151aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2152aab33094Sbellard }
2153aab33094Sbellard 
215413eb76e0Sbellard #endif
215513eb76e0Sbellard 
215613eb76e0Sbellard /* virtual memory access for debug */
2157b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
215913eb76e0Sbellard {
216013eb76e0Sbellard     int l;
216113eb76e0Sbellard     target_ulong page, phys_addr;
216213eb76e0Sbellard 
216313eb76e0Sbellard     while (len > 0) {
216413eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
216513eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
216613eb76e0Sbellard         /* if no physical page mapped, return an error */
216713eb76e0Sbellard         if (phys_addr == -1)
216813eb76e0Sbellard             return -1;
216913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
217013eb76e0Sbellard         if (l > len)
217113eb76e0Sbellard             l = len;
2172b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2173b448f2f3Sbellard                                buf, l, is_write);
217413eb76e0Sbellard         len -= l;
217513eb76e0Sbellard         buf += l;
217613eb76e0Sbellard         addr += l;
217713eb76e0Sbellard     }
217813eb76e0Sbellard     return 0;
217913eb76e0Sbellard }
218013eb76e0Sbellard 
2181e3db7226Sbellard void dump_exec_info(FILE *f,
2182e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2183e3db7226Sbellard {
2184e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2185e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2186e3db7226Sbellard     TranslationBlock *tb;
2187e3db7226Sbellard 
2188e3db7226Sbellard     target_code_size = 0;
2189e3db7226Sbellard     max_target_code_size = 0;
2190e3db7226Sbellard     cross_page = 0;
2191e3db7226Sbellard     direct_jmp_count = 0;
2192e3db7226Sbellard     direct_jmp2_count = 0;
2193e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2194e3db7226Sbellard         tb = &tbs[i];
2195e3db7226Sbellard         target_code_size += tb->size;
2196e3db7226Sbellard         if (tb->size > max_target_code_size)
2197e3db7226Sbellard             max_target_code_size = tb->size;
2198e3db7226Sbellard         if (tb->page_addr[1] != -1)
2199e3db7226Sbellard             cross_page++;
2200e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2201e3db7226Sbellard             direct_jmp_count++;
2202e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2203e3db7226Sbellard                 direct_jmp2_count++;
2204e3db7226Sbellard             }
2205e3db7226Sbellard         }
2206e3db7226Sbellard     }
2207e3db7226Sbellard     /* XXX: avoid using doubles ? */
2208e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2209e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2210e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2211e3db7226Sbellard                 max_target_code_size);
2212e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2213e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2216e3db7226Sbellard             cross_page,
2217e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2219e3db7226Sbellard                 direct_jmp_count,
2220e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2221e3db7226Sbellard                 direct_jmp2_count,
2222e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2224e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2226e3db7226Sbellard }
2227e3db7226Sbellard 
222861382a50Sbellard #if !defined(CONFIG_USER_ONLY)
222961382a50Sbellard 
223061382a50Sbellard #define MMUSUFFIX _cmmu
223161382a50Sbellard #define GETPC() NULL
223261382a50Sbellard #define env cpu_single_env
2233b769d8feSbellard #define SOFTMMU_CODE_ACCESS
223461382a50Sbellard 
223561382a50Sbellard #define SHIFT 0
223661382a50Sbellard #include "softmmu_template.h"
223761382a50Sbellard 
223861382a50Sbellard #define SHIFT 1
223961382a50Sbellard #include "softmmu_template.h"
224061382a50Sbellard 
224161382a50Sbellard #define SHIFT 2
224261382a50Sbellard #include "softmmu_template.h"
224361382a50Sbellard 
224461382a50Sbellard #define SHIFT 3
224561382a50Sbellard #include "softmmu_template.h"
224661382a50Sbellard 
224761382a50Sbellard #undef env
224861382a50Sbellard 
224961382a50Sbellard #endif
2250