xref: /qemu/system/physmem.c (revision 11fcfab4b0d7349aa3b00e3205f3b0805396b680)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
22d5a8f07cSbellard #include <windows.h>
23d5a8f07cSbellard #else
24a98d49b1Sbellard #include <sys/types.h>
25d5a8f07cSbellard #include <sys/mman.h>
26d5a8f07cSbellard #endif
2754936004Sbellard #include <stdlib.h>
2854936004Sbellard #include <stdio.h>
2954936004Sbellard #include <stdarg.h>
3054936004Sbellard #include <string.h>
3154936004Sbellard #include <errno.h>
3254936004Sbellard #include <unistd.h>
3354936004Sbellard #include <inttypes.h>
3454936004Sbellard 
356180a181Sbellard #include "cpu.h"
366180a181Sbellard #include "exec-all.h"
3753a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3853a5960aSpbrook #include <qemu.h>
3953a5960aSpbrook #endif
4054936004Sbellard 
41fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4266e85a21Sbellard //#define DEBUG_FLUSH
439fa3e853Sbellard //#define DEBUG_TLB
4467d3b957Spbrook //#define DEBUG_UNASSIGNED
45fd6ce8f6Sbellard 
46fd6ce8f6Sbellard /* make various TB consistency checks */
47fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
4898857888Sbellard //#define DEBUG_TLB_CHECK
49fd6ce8f6Sbellard 
501196be37Sths //#define DEBUG_IOPORT
51db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
521196be37Sths 
5399773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5499773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5599773bd4Spbrook #undef DEBUG_TB_CHECK
5699773bd4Spbrook #endif
5799773bd4Spbrook 
58fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
59fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60fd6ce8f6Sbellard 
619fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
629fa3e853Sbellard 
639fa3e853Sbellard #define MMAP_AREA_START        0x00000000
649fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
65fd6ce8f6Sbellard 
66108c49b8Sbellard #if defined(TARGET_SPARC64)
67108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
685dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
695dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
71bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
72bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
73108c49b8Sbellard #elif defined(TARGET_PPC64)
74108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
75108c49b8Sbellard #else
76108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
78108c49b8Sbellard #endif
79108c49b8Sbellard 
80fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
819fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82fd6ce8f6Sbellard int nb_tbs;
83eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
84eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85fd6ce8f6Sbellard 
86b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87fd6ce8f6Sbellard uint8_t *code_gen_ptr;
88fd6ce8f6Sbellard 
899fa3e853Sbellard int phys_ram_size;
909fa3e853Sbellard int phys_ram_fd;
919fa3e853Sbellard uint8_t *phys_ram_base;
921ccde1cbSbellard uint8_t *phys_ram_dirty;
93e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
949fa3e853Sbellard 
956a00d601Sbellard CPUState *first_cpu;
966a00d601Sbellard /* current CPU in the current thread. It is only valid inside
976a00d601Sbellard    cpu_exec() */
986a00d601Sbellard CPUState *cpu_single_env;
996a00d601Sbellard 
10054936004Sbellard typedef struct PageDesc {
10192e873b9Sbellard     /* list of TBs intersecting this ram page */
102fd6ce8f6Sbellard     TranslationBlock *first_tb;
1039fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1049fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1059fa3e853Sbellard     unsigned int code_write_count;
1069fa3e853Sbellard     uint8_t *code_bitmap;
1079fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1089fa3e853Sbellard     unsigned long flags;
1099fa3e853Sbellard #endif
11054936004Sbellard } PageDesc;
11154936004Sbellard 
11292e873b9Sbellard typedef struct PhysPageDesc {
11392e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
114e04f40b5Sbellard     uint32_t phys_offset;
11592e873b9Sbellard } PhysPageDesc;
11692e873b9Sbellard 
11754936004Sbellard #define L2_BITS 10
118bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
120bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
121bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
122bedb69eaSj_mayer  */
123bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124bedb69eaSj_mayer #else
12554936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126bedb69eaSj_mayer #endif
12754936004Sbellard 
12854936004Sbellard #define L1_SIZE (1 << L1_BITS)
12954936004Sbellard #define L2_SIZE (1 << L2_BITS)
13054936004Sbellard 
13133417e70Sbellard static void io_mem_init(void);
132fd6ce8f6Sbellard 
13383fb7adfSbellard unsigned long qemu_real_host_page_size;
13483fb7adfSbellard unsigned long qemu_host_page_bits;
13583fb7adfSbellard unsigned long qemu_host_page_size;
13683fb7adfSbellard unsigned long qemu_host_page_mask;
13754936004Sbellard 
13892e873b9Sbellard /* XXX: for system emulation, it could just be an array */
13954936004Sbellard static PageDesc *l1_map[L1_SIZE];
1400a962c02Sbellard PhysPageDesc **l1_phys_map;
14154936004Sbellard 
14233417e70Sbellard /* io memory support */
14333417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
14433417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
14633417e70Sbellard static int io_mem_nb;
1476658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
1486658ffb8Spbrook static int io_mem_watch;
1496658ffb8Spbrook #endif
15033417e70Sbellard 
15134865134Sbellard /* log support */
15234865134Sbellard char *logfilename = "/tmp/qemu.log";
15334865134Sbellard FILE *logfile;
15434865134Sbellard int loglevel;
155e735b91cSpbrook static int log_append = 0;
15634865134Sbellard 
157e3db7226Sbellard /* statistics */
158e3db7226Sbellard static int tlb_flush_count;
159e3db7226Sbellard static int tb_flush_count;
160e3db7226Sbellard static int tb_phys_invalidate_count;
161e3db7226Sbellard 
162db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163db7b5426Sblueswir1 typedef struct subpage_t {
164db7b5426Sblueswir1     target_phys_addr_t base;
165db7b5426Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166db7b5426Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167db7b5426Sblueswir1     void *opaque[TARGET_PAGE_SIZE];
168db7b5426Sblueswir1 } subpage_t;
169db7b5426Sblueswir1 
170b346ff46Sbellard static void page_init(void)
17154936004Sbellard {
17283fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
17354936004Sbellard        TARGET_PAGE_SIZE */
17467b915a5Sbellard #ifdef _WIN32
175d5a8f07cSbellard     {
176d5a8f07cSbellard         SYSTEM_INFO system_info;
177d5a8f07cSbellard         DWORD old_protect;
178d5a8f07cSbellard 
179d5a8f07cSbellard         GetSystemInfo(&system_info);
180d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
181d5a8f07cSbellard 
182d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
184d5a8f07cSbellard     }
18567b915a5Sbellard #else
18683fb7adfSbellard     qemu_real_host_page_size = getpagesize();
187d5a8f07cSbellard     {
188d5a8f07cSbellard         unsigned long start, end;
189d5a8f07cSbellard 
190d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
191d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
192d5a8f07cSbellard 
193d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
195d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
196d5a8f07cSbellard 
197d5a8f07cSbellard         mprotect((void *)start, end - start,
198d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
199d5a8f07cSbellard     }
20067b915a5Sbellard #endif
201d5a8f07cSbellard 
20283fb7adfSbellard     if (qemu_host_page_size == 0)
20383fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
20483fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
20583fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
20683fb7adfSbellard     qemu_host_page_bits = 0;
20783fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
20883fb7adfSbellard         qemu_host_page_bits++;
20983fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
210108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
21254936004Sbellard }
21354936004Sbellard 
214fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
21554936004Sbellard {
21654936004Sbellard     PageDesc **lp, *p;
21754936004Sbellard 
21854936004Sbellard     lp = &l1_map[index >> L2_BITS];
21954936004Sbellard     p = *lp;
22054936004Sbellard     if (!p) {
22154936004Sbellard         /* allocate if not found */
22259817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
22454936004Sbellard         *lp = p;
22554936004Sbellard     }
22654936004Sbellard     return p + (index & (L2_SIZE - 1));
22754936004Sbellard }
22854936004Sbellard 
229fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
23054936004Sbellard {
23154936004Sbellard     PageDesc *p;
23254936004Sbellard 
23354936004Sbellard     p = l1_map[index >> L2_BITS];
23454936004Sbellard     if (!p)
23554936004Sbellard         return 0;
236fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
23754936004Sbellard }
23854936004Sbellard 
239108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
24092e873b9Sbellard {
241108c49b8Sbellard     void **lp, **p;
242e3f4e2a4Spbrook     PhysPageDesc *pd;
24392e873b9Sbellard 
244108c49b8Sbellard     p = (void **)l1_phys_map;
245108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
246108c49b8Sbellard 
247108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249108c49b8Sbellard #endif
250108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
25192e873b9Sbellard     p = *lp;
25292e873b9Sbellard     if (!p) {
25392e873b9Sbellard         /* allocate if not found */
254108c49b8Sbellard         if (!alloc)
255108c49b8Sbellard             return NULL;
256108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
258108c49b8Sbellard         *lp = p;
259108c49b8Sbellard     }
260108c49b8Sbellard #endif
261108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262e3f4e2a4Spbrook     pd = *lp;
263e3f4e2a4Spbrook     if (!pd) {
264e3f4e2a4Spbrook         int i;
265108c49b8Sbellard         /* allocate if not found */
266108c49b8Sbellard         if (!alloc)
267108c49b8Sbellard             return NULL;
268e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269e3f4e2a4Spbrook         *lp = pd;
270e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
271e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
27292e873b9Sbellard     }
273e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
27492e873b9Sbellard }
27592e873b9Sbellard 
276108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
27792e873b9Sbellard {
278108c49b8Sbellard     return phys_page_find_alloc(index, 0);
27992e873b9Sbellard }
28092e873b9Sbellard 
2819fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
2826a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
2833a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2843a7d929eSbellard                                     target_ulong vaddr);
2859fa3e853Sbellard #endif
286fd6ce8f6Sbellard 
2876a00d601Sbellard void cpu_exec_init(CPUState *env)
288fd6ce8f6Sbellard {
2896a00d601Sbellard     CPUState **penv;
2906a00d601Sbellard     int cpu_index;
2916a00d601Sbellard 
292fd6ce8f6Sbellard     if (!code_gen_ptr) {
293fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
294b346ff46Sbellard         page_init();
29533417e70Sbellard         io_mem_init();
296fd6ce8f6Sbellard     }
2976a00d601Sbellard     env->next_cpu = NULL;
2986a00d601Sbellard     penv = &first_cpu;
2996a00d601Sbellard     cpu_index = 0;
3006a00d601Sbellard     while (*penv != NULL) {
3016a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
3026a00d601Sbellard         cpu_index++;
3036a00d601Sbellard     }
3046a00d601Sbellard     env->cpu_index = cpu_index;
3056658ffb8Spbrook     env->nb_watchpoints = 0;
3066a00d601Sbellard     *penv = env;
307fd6ce8f6Sbellard }
308fd6ce8f6Sbellard 
3099fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
3109fa3e853Sbellard {
3119fa3e853Sbellard     if (p->code_bitmap) {
31259817ccbSbellard         qemu_free(p->code_bitmap);
3139fa3e853Sbellard         p->code_bitmap = NULL;
3149fa3e853Sbellard     }
3159fa3e853Sbellard     p->code_write_count = 0;
3169fa3e853Sbellard }
3179fa3e853Sbellard 
318fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
319fd6ce8f6Sbellard static void page_flush_tb(void)
320fd6ce8f6Sbellard {
321fd6ce8f6Sbellard     int i, j;
322fd6ce8f6Sbellard     PageDesc *p;
323fd6ce8f6Sbellard 
324fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
325fd6ce8f6Sbellard         p = l1_map[i];
326fd6ce8f6Sbellard         if (p) {
3279fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
3289fa3e853Sbellard                 p->first_tb = NULL;
3299fa3e853Sbellard                 invalidate_page_bitmap(p);
3309fa3e853Sbellard                 p++;
3319fa3e853Sbellard             }
332fd6ce8f6Sbellard         }
333fd6ce8f6Sbellard     }
334fd6ce8f6Sbellard }
335fd6ce8f6Sbellard 
336fd6ce8f6Sbellard /* flush all the translation blocks */
337d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
3386a00d601Sbellard void tb_flush(CPUState *env1)
339fd6ce8f6Sbellard {
3406a00d601Sbellard     CPUState *env;
3410124311eSbellard #if defined(DEBUG_FLUSH)
342fd6ce8f6Sbellard     printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343fd6ce8f6Sbellard            code_gen_ptr - code_gen_buffer,
344fd6ce8f6Sbellard            nb_tbs,
3450124311eSbellard            nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
346fd6ce8f6Sbellard #endif
347fd6ce8f6Sbellard     nb_tbs = 0;
3486a00d601Sbellard 
3496a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3508a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3516a00d601Sbellard     }
3529fa3e853Sbellard 
3538a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354fd6ce8f6Sbellard     page_flush_tb();
3559fa3e853Sbellard 
356fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
357d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
358d4e8164fSbellard        expensive */
359e3db7226Sbellard     tb_flush_count++;
360fd6ce8f6Sbellard }
361fd6ce8f6Sbellard 
362fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
363fd6ce8f6Sbellard 
364bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
365fd6ce8f6Sbellard {
366fd6ce8f6Sbellard     TranslationBlock *tb;
367fd6ce8f6Sbellard     int i;
368fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
36999773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
37099773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
373fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
37499773bd4Spbrook                        address, (long)tb->pc, tb->size);
375fd6ce8f6Sbellard             }
376fd6ce8f6Sbellard         }
377fd6ce8f6Sbellard     }
378fd6ce8f6Sbellard }
379fd6ce8f6Sbellard 
380fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
381fd6ce8f6Sbellard static void tb_page_check(void)
382fd6ce8f6Sbellard {
383fd6ce8f6Sbellard     TranslationBlock *tb;
384fd6ce8f6Sbellard     int i, flags1, flags2;
385fd6ce8f6Sbellard 
38699773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
38799773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
389fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
390fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
39299773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
393fd6ce8f6Sbellard             }
394fd6ce8f6Sbellard         }
395fd6ce8f6Sbellard     }
396fd6ce8f6Sbellard }
397fd6ce8f6Sbellard 
398d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
399d4e8164fSbellard {
400d4e8164fSbellard     TranslationBlock *tb1;
401d4e8164fSbellard     unsigned int n1;
402d4e8164fSbellard 
403d4e8164fSbellard     /* suppress any remaining jumps to this TB */
404d4e8164fSbellard     tb1 = tb->jmp_first;
405d4e8164fSbellard     for(;;) {
406d4e8164fSbellard         n1 = (long)tb1 & 3;
407d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
408d4e8164fSbellard         if (n1 == 2)
409d4e8164fSbellard             break;
410d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
411d4e8164fSbellard     }
412d4e8164fSbellard     /* check end of list */
413d4e8164fSbellard     if (tb1 != tb) {
414d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415d4e8164fSbellard     }
416d4e8164fSbellard }
417d4e8164fSbellard 
418fd6ce8f6Sbellard #endif
419fd6ce8f6Sbellard 
420fd6ce8f6Sbellard /* invalidate one TB */
421fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422fd6ce8f6Sbellard                              int next_offset)
423fd6ce8f6Sbellard {
424fd6ce8f6Sbellard     TranslationBlock *tb1;
425fd6ce8f6Sbellard     for(;;) {
426fd6ce8f6Sbellard         tb1 = *ptb;
427fd6ce8f6Sbellard         if (tb1 == tb) {
428fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429fd6ce8f6Sbellard             break;
430fd6ce8f6Sbellard         }
431fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432fd6ce8f6Sbellard     }
433fd6ce8f6Sbellard }
434fd6ce8f6Sbellard 
4359fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
4369fa3e853Sbellard {
4379fa3e853Sbellard     TranslationBlock *tb1;
4389fa3e853Sbellard     unsigned int n1;
4399fa3e853Sbellard 
4409fa3e853Sbellard     for(;;) {
4419fa3e853Sbellard         tb1 = *ptb;
4429fa3e853Sbellard         n1 = (long)tb1 & 3;
4439fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4449fa3e853Sbellard         if (tb1 == tb) {
4459fa3e853Sbellard             *ptb = tb1->page_next[n1];
4469fa3e853Sbellard             break;
4479fa3e853Sbellard         }
4489fa3e853Sbellard         ptb = &tb1->page_next[n1];
4499fa3e853Sbellard     }
4509fa3e853Sbellard }
4519fa3e853Sbellard 
452d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453d4e8164fSbellard {
454d4e8164fSbellard     TranslationBlock *tb1, **ptb;
455d4e8164fSbellard     unsigned int n1;
456d4e8164fSbellard 
457d4e8164fSbellard     ptb = &tb->jmp_next[n];
458d4e8164fSbellard     tb1 = *ptb;
459d4e8164fSbellard     if (tb1) {
460d4e8164fSbellard         /* find tb(n) in circular list */
461d4e8164fSbellard         for(;;) {
462d4e8164fSbellard             tb1 = *ptb;
463d4e8164fSbellard             n1 = (long)tb1 & 3;
464d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
465d4e8164fSbellard             if (n1 == n && tb1 == tb)
466d4e8164fSbellard                 break;
467d4e8164fSbellard             if (n1 == 2) {
468d4e8164fSbellard                 ptb = &tb1->jmp_first;
469d4e8164fSbellard             } else {
470d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
471d4e8164fSbellard             }
472d4e8164fSbellard         }
473d4e8164fSbellard         /* now we can suppress tb(n) from the list */
474d4e8164fSbellard         *ptb = tb->jmp_next[n];
475d4e8164fSbellard 
476d4e8164fSbellard         tb->jmp_next[n] = NULL;
477d4e8164fSbellard     }
478d4e8164fSbellard }
479d4e8164fSbellard 
480d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
481d4e8164fSbellard    another TB */
482d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
483d4e8164fSbellard {
484d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485d4e8164fSbellard }
486d4e8164fSbellard 
4879fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488fd6ce8f6Sbellard {
4896a00d601Sbellard     CPUState *env;
490fd6ce8f6Sbellard     PageDesc *p;
4918a40a180Sbellard     unsigned int h, n1;
4929fa3e853Sbellard     target_ulong phys_pc;
4938a40a180Sbellard     TranslationBlock *tb1, *tb2;
494fd6ce8f6Sbellard 
4959fa3e853Sbellard     /* remove the TB from the hash list */
4969fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4979fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
4989fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
4999fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
5009fa3e853Sbellard 
5019fa3e853Sbellard     /* remove the TB from the page list */
5029fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
5039fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
5049fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
5059fa3e853Sbellard         invalidate_page_bitmap(p);
5069fa3e853Sbellard     }
5079fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
5089fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
5099fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
5109fa3e853Sbellard         invalidate_page_bitmap(p);
5119fa3e853Sbellard     }
5129fa3e853Sbellard 
5138a40a180Sbellard     tb_invalidated_flag = 1;
5148a40a180Sbellard 
5158a40a180Sbellard     /* remove the TB from the hash list */
5168a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
5176a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
5186a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
5196a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
5206a00d601Sbellard     }
5218a40a180Sbellard 
5228a40a180Sbellard     /* suppress this TB from the two jump lists */
5238a40a180Sbellard     tb_jmp_remove(tb, 0);
5248a40a180Sbellard     tb_jmp_remove(tb, 1);
5258a40a180Sbellard 
5268a40a180Sbellard     /* suppress any remaining jumps to this TB */
5278a40a180Sbellard     tb1 = tb->jmp_first;
5288a40a180Sbellard     for(;;) {
5298a40a180Sbellard         n1 = (long)tb1 & 3;
5308a40a180Sbellard         if (n1 == 2)
5318a40a180Sbellard             break;
5328a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
5338a40a180Sbellard         tb2 = tb1->jmp_next[n1];
5348a40a180Sbellard         tb_reset_jump(tb1, n1);
5358a40a180Sbellard         tb1->jmp_next[n1] = NULL;
5368a40a180Sbellard         tb1 = tb2;
5378a40a180Sbellard     }
5388a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5398a40a180Sbellard 
540e3db7226Sbellard     tb_phys_invalidate_count++;
5419fa3e853Sbellard }
5429fa3e853Sbellard 
5439fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5449fa3e853Sbellard {
5459fa3e853Sbellard     int end, mask, end1;
5469fa3e853Sbellard 
5479fa3e853Sbellard     end = start + len;
5489fa3e853Sbellard     tab += start >> 3;
5499fa3e853Sbellard     mask = 0xff << (start & 7);
5509fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5519fa3e853Sbellard         if (start < end) {
5529fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5539fa3e853Sbellard             *tab |= mask;
5549fa3e853Sbellard         }
5559fa3e853Sbellard     } else {
5569fa3e853Sbellard         *tab++ |= mask;
5579fa3e853Sbellard         start = (start + 8) & ~7;
5589fa3e853Sbellard         end1 = end & ~7;
5599fa3e853Sbellard         while (start < end1) {
5609fa3e853Sbellard             *tab++ = 0xff;
5619fa3e853Sbellard             start += 8;
5629fa3e853Sbellard         }
5639fa3e853Sbellard         if (start < end) {
5649fa3e853Sbellard             mask = ~(0xff << (end & 7));
5659fa3e853Sbellard             *tab |= mask;
5669fa3e853Sbellard         }
5679fa3e853Sbellard     }
5689fa3e853Sbellard }
5699fa3e853Sbellard 
5709fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5719fa3e853Sbellard {
5729fa3e853Sbellard     int n, tb_start, tb_end;
5739fa3e853Sbellard     TranslationBlock *tb;
5749fa3e853Sbellard 
57559817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
5769fa3e853Sbellard     if (!p->code_bitmap)
5779fa3e853Sbellard         return;
5789fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
5799fa3e853Sbellard 
5809fa3e853Sbellard     tb = p->first_tb;
5819fa3e853Sbellard     while (tb != NULL) {
5829fa3e853Sbellard         n = (long)tb & 3;
5839fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
5849fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
5859fa3e853Sbellard         if (n == 0) {
5869fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
5879fa3e853Sbellard                it is not a problem */
5889fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
5899fa3e853Sbellard             tb_end = tb_start + tb->size;
5909fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
5919fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
5929fa3e853Sbellard         } else {
5939fa3e853Sbellard             tb_start = 0;
5949fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
5959fa3e853Sbellard         }
5969fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
5979fa3e853Sbellard         tb = tb->page_next[n];
5989fa3e853Sbellard     }
5999fa3e853Sbellard }
6009fa3e853Sbellard 
601d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
602d720b93dSbellard 
603d720b93dSbellard static void tb_gen_code(CPUState *env,
604d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
605d720b93dSbellard                         int cflags)
606d720b93dSbellard {
607d720b93dSbellard     TranslationBlock *tb;
608d720b93dSbellard     uint8_t *tc_ptr;
609d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
610d720b93dSbellard     int code_gen_size;
611d720b93dSbellard 
612c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
613c27004ecSbellard     tb = tb_alloc(pc);
614d720b93dSbellard     if (!tb) {
615d720b93dSbellard         /* flush must be done */
616d720b93dSbellard         tb_flush(env);
617d720b93dSbellard         /* cannot fail at this point */
618c27004ecSbellard         tb = tb_alloc(pc);
619d720b93dSbellard     }
620d720b93dSbellard     tc_ptr = code_gen_ptr;
621d720b93dSbellard     tb->tc_ptr = tc_ptr;
622d720b93dSbellard     tb->cs_base = cs_base;
623d720b93dSbellard     tb->flags = flags;
624d720b93dSbellard     tb->cflags = cflags;
625d720b93dSbellard     cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627d720b93dSbellard 
628d720b93dSbellard     /* check next page if needed */
629c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630d720b93dSbellard     phys_page2 = -1;
631c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
633d720b93dSbellard     }
634d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
635d720b93dSbellard }
636d720b93dSbellard #endif
637d720b93dSbellard 
6389fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6399fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
640d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
641d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
642d720b93dSbellard    TB if code is modified inside this TB. */
643d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644d720b93dSbellard                                    int is_cpu_write_access)
6459fa3e853Sbellard {
646d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
647d720b93dSbellard     CPUState *env = cpu_single_env;
6489fa3e853Sbellard     PageDesc *p;
649ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6509fa3e853Sbellard     target_ulong tb_start, tb_end;
651d720b93dSbellard     target_ulong current_pc, current_cs_base;
6529fa3e853Sbellard 
6539fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6549fa3e853Sbellard     if (!p)
6559fa3e853Sbellard         return;
6569fa3e853Sbellard     if (!p->code_bitmap &&
657d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658d720b93dSbellard         is_cpu_write_access) {
6599fa3e853Sbellard         /* build code bitmap */
6609fa3e853Sbellard         build_page_bitmap(p);
6619fa3e853Sbellard     }
6629fa3e853Sbellard 
6639fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6649fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
665d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
666d720b93dSbellard     current_tb_modified = 0;
667d720b93dSbellard     current_tb = NULL; /* avoid warning */
668d720b93dSbellard     current_pc = 0; /* avoid warning */
669d720b93dSbellard     current_cs_base = 0; /* avoid warning */
670d720b93dSbellard     current_flags = 0; /* avoid warning */
6719fa3e853Sbellard     tb = p->first_tb;
6729fa3e853Sbellard     while (tb != NULL) {
6739fa3e853Sbellard         n = (long)tb & 3;
6749fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6759fa3e853Sbellard         tb_next = tb->page_next[n];
6769fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6779fa3e853Sbellard         if (n == 0) {
6789fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6799fa3e853Sbellard                it is not a problem */
6809fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
6819fa3e853Sbellard             tb_end = tb_start + tb->size;
6829fa3e853Sbellard         } else {
6839fa3e853Sbellard             tb_start = tb->page_addr[1];
6849fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6859fa3e853Sbellard         }
6869fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
687d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
688d720b93dSbellard             if (current_tb_not_found) {
689d720b93dSbellard                 current_tb_not_found = 0;
690d720b93dSbellard                 current_tb = NULL;
691d720b93dSbellard                 if (env->mem_write_pc) {
692d720b93dSbellard                     /* now we have a real cpu fault */
693d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
694d720b93dSbellard                 }
695d720b93dSbellard             }
696d720b93dSbellard             if (current_tb == tb &&
697d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
698d720b93dSbellard                 /* If we are modifying the current TB, we must stop
699d720b93dSbellard                 its execution. We could be more precise by checking
700d720b93dSbellard                 that the modification is after the current PC, but it
701d720b93dSbellard                 would require a specialized function to partially
702d720b93dSbellard                 restore the CPU state */
703d720b93dSbellard 
704d720b93dSbellard                 current_tb_modified = 1;
705d720b93dSbellard                 cpu_restore_state(current_tb, env,
706d720b93dSbellard                                   env->mem_write_pc, NULL);
707d720b93dSbellard #if defined(TARGET_I386)
708d720b93dSbellard                 current_flags = env->hflags;
709d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
711d720b93dSbellard                 current_pc = current_cs_base + env->eip;
712d720b93dSbellard #else
713d720b93dSbellard #error unsupported CPU
714d720b93dSbellard #endif
715d720b93dSbellard             }
716d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7176f5a9f7eSbellard             /* we need to do that to handle the case where a signal
7186f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
7196f5a9f7eSbellard             saved_tb = NULL;
7206f5a9f7eSbellard             if (env) {
721ea1c1802Sbellard                 saved_tb = env->current_tb;
722ea1c1802Sbellard                 env->current_tb = NULL;
7236f5a9f7eSbellard             }
7249fa3e853Sbellard             tb_phys_invalidate(tb, -1);
7256f5a9f7eSbellard             if (env) {
726ea1c1802Sbellard                 env->current_tb = saved_tb;
727ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
728ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
7299fa3e853Sbellard             }
7306f5a9f7eSbellard         }
7319fa3e853Sbellard         tb = tb_next;
7329fa3e853Sbellard     }
7339fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
7349fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
7359fa3e853Sbellard     if (!p->first_tb) {
7369fa3e853Sbellard         invalidate_page_bitmap(p);
737d720b93dSbellard         if (is_cpu_write_access) {
738d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739d720b93dSbellard         }
740d720b93dSbellard     }
741d720b93dSbellard #endif
742d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
743d720b93dSbellard     if (current_tb_modified) {
744d720b93dSbellard         /* we generate a block containing just the instruction
745d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
746d720b93dSbellard            itself */
747ea1c1802Sbellard         env->current_tb = NULL;
748d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
749d720b93dSbellard                     CF_SINGLE_INSN);
750d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7519fa3e853Sbellard     }
7529fa3e853Sbellard #endif
7539fa3e853Sbellard }
7549fa3e853Sbellard 
7559fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
756d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7579fa3e853Sbellard {
7589fa3e853Sbellard     PageDesc *p;
7599fa3e853Sbellard     int offset, b;
76059817ccbSbellard #if 0
761a4193c8aSbellard     if (1) {
762a4193c8aSbellard         if (loglevel) {
763a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
765a4193c8aSbellard                    cpu_single_env->eip,
766a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767a4193c8aSbellard         }
76859817ccbSbellard     }
76959817ccbSbellard #endif
7709fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7719fa3e853Sbellard     if (!p)
7729fa3e853Sbellard         return;
7739fa3e853Sbellard     if (p->code_bitmap) {
7749fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
7759fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
7769fa3e853Sbellard         if (b & ((1 << len) - 1))
7779fa3e853Sbellard             goto do_invalidate;
7789fa3e853Sbellard     } else {
7799fa3e853Sbellard     do_invalidate:
780d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
7819fa3e853Sbellard     }
7829fa3e853Sbellard }
7839fa3e853Sbellard 
7849fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
785d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
786d720b93dSbellard                                     unsigned long pc, void *puc)
7879fa3e853Sbellard {
788d720b93dSbellard     int n, current_flags, current_tb_modified;
789d720b93dSbellard     target_ulong current_pc, current_cs_base;
7909fa3e853Sbellard     PageDesc *p;
791d720b93dSbellard     TranslationBlock *tb, *current_tb;
792d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
793d720b93dSbellard     CPUState *env = cpu_single_env;
794d720b93dSbellard #endif
7959fa3e853Sbellard 
7969fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
7979fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
798fd6ce8f6Sbellard     if (!p)
799fd6ce8f6Sbellard         return;
800fd6ce8f6Sbellard     tb = p->first_tb;
801d720b93dSbellard     current_tb_modified = 0;
802d720b93dSbellard     current_tb = NULL;
803d720b93dSbellard     current_pc = 0; /* avoid warning */
804d720b93dSbellard     current_cs_base = 0; /* avoid warning */
805d720b93dSbellard     current_flags = 0; /* avoid warning */
806d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
807d720b93dSbellard     if (tb && pc != 0) {
808d720b93dSbellard         current_tb = tb_find_pc(pc);
809d720b93dSbellard     }
810d720b93dSbellard #endif
811fd6ce8f6Sbellard     while (tb != NULL) {
8129fa3e853Sbellard         n = (long)tb & 3;
8139fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
814d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
815d720b93dSbellard         if (current_tb == tb &&
816d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
817d720b93dSbellard                 /* If we are modifying the current TB, we must stop
818d720b93dSbellard                    its execution. We could be more precise by checking
819d720b93dSbellard                    that the modification is after the current PC, but it
820d720b93dSbellard                    would require a specialized function to partially
821d720b93dSbellard                    restore the CPU state */
822d720b93dSbellard 
823d720b93dSbellard             current_tb_modified = 1;
824d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
825d720b93dSbellard #if defined(TARGET_I386)
826d720b93dSbellard             current_flags = env->hflags;
827d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
829d720b93dSbellard             current_pc = current_cs_base + env->eip;
830d720b93dSbellard #else
831d720b93dSbellard #error unsupported CPU
832d720b93dSbellard #endif
833d720b93dSbellard         }
834d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
8359fa3e853Sbellard         tb_phys_invalidate(tb, addr);
8369fa3e853Sbellard         tb = tb->page_next[n];
837fd6ce8f6Sbellard     }
838fd6ce8f6Sbellard     p->first_tb = NULL;
839d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
840d720b93dSbellard     if (current_tb_modified) {
841d720b93dSbellard         /* we generate a block containing just the instruction
842d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
843d720b93dSbellard            itself */
844ea1c1802Sbellard         env->current_tb = NULL;
845d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
846d720b93dSbellard                     CF_SINGLE_INSN);
847d720b93dSbellard         cpu_resume_from_signal(env, puc);
848d720b93dSbellard     }
849d720b93dSbellard #endif
850fd6ce8f6Sbellard }
8519fa3e853Sbellard #endif
852fd6ce8f6Sbellard 
853fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8549fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
85553a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
856fd6ce8f6Sbellard {
857fd6ce8f6Sbellard     PageDesc *p;
8589fa3e853Sbellard     TranslationBlock *last_first_tb;
8599fa3e853Sbellard 
8609fa3e853Sbellard     tb->page_addr[n] = page_addr;
8613a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8629fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8639fa3e853Sbellard     last_first_tb = p->first_tb;
8649fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8659fa3e853Sbellard     invalidate_page_bitmap(p);
8669fa3e853Sbellard 
867107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
868d720b93dSbellard 
8699fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8709fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
87153a5960aSpbrook         target_ulong addr;
87253a5960aSpbrook         PageDesc *p2;
873fd6ce8f6Sbellard         int prot;
874fd6ce8f6Sbellard 
875fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
876fd6ce8f6Sbellard            page fault + mprotect overhead) */
87753a5960aSpbrook         page_addr &= qemu_host_page_mask;
878fd6ce8f6Sbellard         prot = 0;
87953a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
88053a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
88153a5960aSpbrook 
88253a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
88353a5960aSpbrook             if (!p2)
88453a5960aSpbrook                 continue;
88553a5960aSpbrook             prot |= p2->flags;
88653a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
88753a5960aSpbrook             page_get_flags(addr);
88853a5960aSpbrook           }
88953a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
890fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
891fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
892fd6ce8f6Sbellard         printf("protecting code page: 0x%08lx\n",
89353a5960aSpbrook                page_addr);
894fd6ce8f6Sbellard #endif
895fd6ce8f6Sbellard     }
8969fa3e853Sbellard #else
8979fa3e853Sbellard     /* if some code is already present, then the pages are already
8989fa3e853Sbellard        protected. So we handle the case where only the first TB is
8999fa3e853Sbellard        allocated in a physical page */
9009fa3e853Sbellard     if (!last_first_tb) {
9016a00d601Sbellard         tlb_protect_code(page_addr);
9029fa3e853Sbellard     }
9039fa3e853Sbellard #endif
904d720b93dSbellard 
905d720b93dSbellard #endif /* TARGET_HAS_SMC */
906fd6ce8f6Sbellard }
907fd6ce8f6Sbellard 
908fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
909fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
910c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
911fd6ce8f6Sbellard {
912fd6ce8f6Sbellard     TranslationBlock *tb;
913fd6ce8f6Sbellard 
914fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916d4e8164fSbellard         return NULL;
917fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
918fd6ce8f6Sbellard     tb->pc = pc;
919b448f2f3Sbellard     tb->cflags = 0;
920d4e8164fSbellard     return tb;
921d4e8164fSbellard }
922d4e8164fSbellard 
9239fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
9249fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
9259fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
9269fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
927d4e8164fSbellard {
9289fa3e853Sbellard     unsigned int h;
9299fa3e853Sbellard     TranslationBlock **ptb;
9309fa3e853Sbellard 
9319fa3e853Sbellard     /* add in the physical hash table */
9329fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9339fa3e853Sbellard     ptb = &tb_phys_hash[h];
9349fa3e853Sbellard     tb->phys_hash_next = *ptb;
9359fa3e853Sbellard     *ptb = tb;
936fd6ce8f6Sbellard 
937fd6ce8f6Sbellard     /* add in the page list */
9389fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9399fa3e853Sbellard     if (phys_page2 != -1)
9409fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9419fa3e853Sbellard     else
9429fa3e853Sbellard         tb->page_addr[1] = -1;
9439fa3e853Sbellard 
944d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945d4e8164fSbellard     tb->jmp_next[0] = NULL;
946d4e8164fSbellard     tb->jmp_next[1] = NULL;
947b448f2f3Sbellard #ifdef USE_CODE_COPY
948b448f2f3Sbellard     tb->cflags &= ~CF_FP_USED;
949b448f2f3Sbellard     if (tb->cflags & CF_TB_FP_USED)
950b448f2f3Sbellard         tb->cflags |= CF_FP_USED;
951b448f2f3Sbellard #endif
952d4e8164fSbellard 
953d4e8164fSbellard     /* init original jump addresses */
954d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
955d4e8164fSbellard         tb_reset_jump(tb, 0);
956d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
957d4e8164fSbellard         tb_reset_jump(tb, 1);
9588a40a180Sbellard 
9598a40a180Sbellard #ifdef DEBUG_TB_CHECK
9608a40a180Sbellard     tb_page_check();
9618a40a180Sbellard #endif
962fd6ce8f6Sbellard }
963fd6ce8f6Sbellard 
964a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
966a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
967a513fe19Sbellard {
968a513fe19Sbellard     int m_min, m_max, m;
969a513fe19Sbellard     unsigned long v;
970a513fe19Sbellard     TranslationBlock *tb;
971a513fe19Sbellard 
972a513fe19Sbellard     if (nb_tbs <= 0)
973a513fe19Sbellard         return NULL;
974a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
975a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
976a513fe19Sbellard         return NULL;
977a513fe19Sbellard     /* binary search (cf Knuth) */
978a513fe19Sbellard     m_min = 0;
979a513fe19Sbellard     m_max = nb_tbs - 1;
980a513fe19Sbellard     while (m_min <= m_max) {
981a513fe19Sbellard         m = (m_min + m_max) >> 1;
982a513fe19Sbellard         tb = &tbs[m];
983a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
984a513fe19Sbellard         if (v == tc_ptr)
985a513fe19Sbellard             return tb;
986a513fe19Sbellard         else if (tc_ptr < v) {
987a513fe19Sbellard             m_max = m - 1;
988a513fe19Sbellard         } else {
989a513fe19Sbellard             m_min = m + 1;
990a513fe19Sbellard         }
991a513fe19Sbellard     }
992a513fe19Sbellard     return &tbs[m_max];
993a513fe19Sbellard }
9947501267eSbellard 
995ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
996ea041c0eSbellard 
997ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
998ea041c0eSbellard {
999ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1000ea041c0eSbellard     unsigned int n1;
1001ea041c0eSbellard 
1002ea041c0eSbellard     tb1 = tb->jmp_next[n];
1003ea041c0eSbellard     if (tb1 != NULL) {
1004ea041c0eSbellard         /* find head of list */
1005ea041c0eSbellard         for(;;) {
1006ea041c0eSbellard             n1 = (long)tb1 & 3;
1007ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1008ea041c0eSbellard             if (n1 == 2)
1009ea041c0eSbellard                 break;
1010ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1011ea041c0eSbellard         }
1012ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1013ea041c0eSbellard         tb_next = tb1;
1014ea041c0eSbellard 
1015ea041c0eSbellard         /* remove tb from the jmp_first list */
1016ea041c0eSbellard         ptb = &tb_next->jmp_first;
1017ea041c0eSbellard         for(;;) {
1018ea041c0eSbellard             tb1 = *ptb;
1019ea041c0eSbellard             n1 = (long)tb1 & 3;
1020ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1021ea041c0eSbellard             if (n1 == n && tb1 == tb)
1022ea041c0eSbellard                 break;
1023ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1024ea041c0eSbellard         }
1025ea041c0eSbellard         *ptb = tb->jmp_next[n];
1026ea041c0eSbellard         tb->jmp_next[n] = NULL;
1027ea041c0eSbellard 
1028ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1029ea041c0eSbellard         tb_reset_jump(tb, n);
1030ea041c0eSbellard 
10310124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1032ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1033ea041c0eSbellard     }
1034ea041c0eSbellard }
1035ea041c0eSbellard 
1036ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1037ea041c0eSbellard {
1038ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1039ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1040ea041c0eSbellard }
1041ea041c0eSbellard 
10421fddef4bSbellard #if defined(TARGET_HAS_ICE)
1043d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044d720b93dSbellard {
10459b3c35e0Sj_mayer     target_phys_addr_t addr;
10469b3c35e0Sj_mayer     target_ulong pd;
1047c2f07f81Spbrook     ram_addr_t ram_addr;
1048c2f07f81Spbrook     PhysPageDesc *p;
1049d720b93dSbellard 
1050c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1051c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1052c2f07f81Spbrook     if (!p) {
1053c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1054c2f07f81Spbrook     } else {
1055c2f07f81Spbrook         pd = p->phys_offset;
1056c2f07f81Spbrook     }
1057c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1058706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1059d720b93dSbellard }
1060c27004ecSbellard #endif
1061d720b93dSbellard 
10626658ffb8Spbrook /* Add a watchpoint.  */
10636658ffb8Spbrook int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
10646658ffb8Spbrook {
10656658ffb8Spbrook     int i;
10666658ffb8Spbrook 
10676658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
10686658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr)
10696658ffb8Spbrook             return 0;
10706658ffb8Spbrook     }
10716658ffb8Spbrook     if (env->nb_watchpoints >= MAX_WATCHPOINTS)
10726658ffb8Spbrook         return -1;
10736658ffb8Spbrook 
10746658ffb8Spbrook     i = env->nb_watchpoints++;
10756658ffb8Spbrook     env->watchpoint[i].vaddr = addr;
10766658ffb8Spbrook     tlb_flush_page(env, addr);
10776658ffb8Spbrook     /* FIXME: This flush is needed because of the hack to make memory ops
10786658ffb8Spbrook        terminate the TB.  It can be removed once the proper IO trap and
10796658ffb8Spbrook        re-execute bits are in.  */
10806658ffb8Spbrook     tb_flush(env);
10816658ffb8Spbrook     return i;
10826658ffb8Spbrook }
10836658ffb8Spbrook 
10846658ffb8Spbrook /* Remove a watchpoint.  */
10856658ffb8Spbrook int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
10866658ffb8Spbrook {
10876658ffb8Spbrook     int i;
10886658ffb8Spbrook 
10896658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
10906658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr) {
10916658ffb8Spbrook             env->nb_watchpoints--;
10926658ffb8Spbrook             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
10936658ffb8Spbrook             tlb_flush_page(env, addr);
10946658ffb8Spbrook             return 0;
10956658ffb8Spbrook         }
10966658ffb8Spbrook     }
10976658ffb8Spbrook     return -1;
10986658ffb8Spbrook }
10996658ffb8Spbrook 
1100c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101c33a346eSbellard    breakpoint is reached */
11022e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
11034c3a88a2Sbellard {
11041fddef4bSbellard #if defined(TARGET_HAS_ICE)
11054c3a88a2Sbellard     int i;
11064c3a88a2Sbellard 
11074c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
11084c3a88a2Sbellard         if (env->breakpoints[i] == pc)
11094c3a88a2Sbellard             return 0;
11104c3a88a2Sbellard     }
11114c3a88a2Sbellard 
11124c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
11134c3a88a2Sbellard         return -1;
11144c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1115d720b93dSbellard 
1116d720b93dSbellard     breakpoint_invalidate(env, pc);
11174c3a88a2Sbellard     return 0;
11184c3a88a2Sbellard #else
11194c3a88a2Sbellard     return -1;
11204c3a88a2Sbellard #endif
11214c3a88a2Sbellard }
11224c3a88a2Sbellard 
11234c3a88a2Sbellard /* remove a breakpoint */
11242e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
11254c3a88a2Sbellard {
11261fddef4bSbellard #if defined(TARGET_HAS_ICE)
11274c3a88a2Sbellard     int i;
11284c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
11294c3a88a2Sbellard         if (env->breakpoints[i] == pc)
11304c3a88a2Sbellard             goto found;
11314c3a88a2Sbellard     }
11324c3a88a2Sbellard     return -1;
11334c3a88a2Sbellard  found:
11344c3a88a2Sbellard     env->nb_breakpoints--;
11351fddef4bSbellard     if (i < env->nb_breakpoints)
11361fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1137d720b93dSbellard 
1138d720b93dSbellard     breakpoint_invalidate(env, pc);
11394c3a88a2Sbellard     return 0;
11404c3a88a2Sbellard #else
11414c3a88a2Sbellard     return -1;
11424c3a88a2Sbellard #endif
11434c3a88a2Sbellard }
11444c3a88a2Sbellard 
1145c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1146c33a346eSbellard    CPU loop after each instruction */
1147c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1148c33a346eSbellard {
11491fddef4bSbellard #if defined(TARGET_HAS_ICE)
1150c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1151c33a346eSbellard         env->singlestep_enabled = enabled;
1152c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
11539fa3e853Sbellard         /* XXX: only flush what is necessary */
11540124311eSbellard         tb_flush(env);
1155c33a346eSbellard     }
1156c33a346eSbellard #endif
1157c33a346eSbellard }
1158c33a346eSbellard 
115934865134Sbellard /* enable or disable low levels log */
116034865134Sbellard void cpu_set_log(int log_flags)
116134865134Sbellard {
116234865134Sbellard     loglevel = log_flags;
116334865134Sbellard     if (loglevel && !logfile) {
116411fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
116534865134Sbellard         if (!logfile) {
116634865134Sbellard             perror(logfilename);
116734865134Sbellard             _exit(1);
116834865134Sbellard         }
11699fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
11709fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
11719fa3e853Sbellard         {
11729fa3e853Sbellard             static uint8_t logfile_buf[4096];
11739fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
11749fa3e853Sbellard         }
11759fa3e853Sbellard #else
117634865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
11779fa3e853Sbellard #endif
1178e735b91cSpbrook         log_append = 1;
1179e735b91cSpbrook     }
1180e735b91cSpbrook     if (!loglevel && logfile) {
1181e735b91cSpbrook         fclose(logfile);
1182e735b91cSpbrook         logfile = NULL;
118334865134Sbellard     }
118434865134Sbellard }
118534865134Sbellard 
118634865134Sbellard void cpu_set_log_filename(const char *filename)
118734865134Sbellard {
118834865134Sbellard     logfilename = strdup(filename);
1189e735b91cSpbrook     if (logfile) {
1190e735b91cSpbrook         fclose(logfile);
1191e735b91cSpbrook         logfile = NULL;
1192e735b91cSpbrook     }
1193e735b91cSpbrook     cpu_set_log(loglevel);
119434865134Sbellard }
1195c33a346eSbellard 
11960124311eSbellard /* mask must never be zero, except for A20 change call */
119768a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1198ea041c0eSbellard {
1199ea041c0eSbellard     TranslationBlock *tb;
1200ee8b7021Sbellard     static int interrupt_lock;
1201ea041c0eSbellard 
120268a79315Sbellard     env->interrupt_request |= mask;
1203ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1204ea041c0eSbellard        all the potentially executing TB */
1205ea041c0eSbellard     tb = env->current_tb;
1206ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1207ee8b7021Sbellard         env->current_tb = NULL;
1208ea041c0eSbellard         tb_reset_jump_recursive(tb);
1209ee8b7021Sbellard         interrupt_lock = 0;
1210ea041c0eSbellard     }
1211ea041c0eSbellard }
1212ea041c0eSbellard 
1213b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1214b54ad049Sbellard {
1215b54ad049Sbellard     env->interrupt_request &= ~mask;
1216b54ad049Sbellard }
1217b54ad049Sbellard 
1218f193c797Sbellard CPULogItem cpu_log_items[] = {
1219f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1220f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1221f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1222f193c797Sbellard       "show target assembly code for each compiled TB" },
1223f193c797Sbellard     { CPU_LOG_TB_OP, "op",
1224f193c797Sbellard       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225f193c797Sbellard #ifdef TARGET_I386
1226f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1227f193c797Sbellard       "show micro ops after optimization for each compiled TB" },
1228f193c797Sbellard #endif
1229f193c797Sbellard     { CPU_LOG_INT, "int",
1230f193c797Sbellard       "show interrupts/exceptions in short format" },
1231f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1232f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
12339fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1234e91c8a77Sths       "show CPU state before block translation" },
1235f193c797Sbellard #ifdef TARGET_I386
1236f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1237f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1238f193c797Sbellard #endif
12398e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1240fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1241fd872598Sbellard       "show all i/o ports accesses" },
12428e3a9fd2Sbellard #endif
1243f193c797Sbellard     { 0, NULL, NULL },
1244f193c797Sbellard };
1245f193c797Sbellard 
1246f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1247f193c797Sbellard {
1248f193c797Sbellard     if (strlen(s2) != n)
1249f193c797Sbellard         return 0;
1250f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1251f193c797Sbellard }
1252f193c797Sbellard 
1253f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1254f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1255f193c797Sbellard {
1256f193c797Sbellard     CPULogItem *item;
1257f193c797Sbellard     int mask;
1258f193c797Sbellard     const char *p, *p1;
1259f193c797Sbellard 
1260f193c797Sbellard     p = str;
1261f193c797Sbellard     mask = 0;
1262f193c797Sbellard     for(;;) {
1263f193c797Sbellard         p1 = strchr(p, ',');
1264f193c797Sbellard         if (!p1)
1265f193c797Sbellard             p1 = p + strlen(p);
12668e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
12678e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
12688e3a9fd2Sbellard 			mask |= item->mask;
12698e3a9fd2Sbellard 		}
12708e3a9fd2Sbellard 	} else {
1271f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1272f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1273f193c797Sbellard                 goto found;
1274f193c797Sbellard         }
1275f193c797Sbellard         return 0;
12768e3a9fd2Sbellard 	}
1277f193c797Sbellard     found:
1278f193c797Sbellard         mask |= item->mask;
1279f193c797Sbellard         if (*p1 != ',')
1280f193c797Sbellard             break;
1281f193c797Sbellard         p = p1 + 1;
1282f193c797Sbellard     }
1283f193c797Sbellard     return mask;
1284f193c797Sbellard }
1285ea041c0eSbellard 
12867501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
12877501267eSbellard {
12887501267eSbellard     va_list ap;
12897501267eSbellard 
12907501267eSbellard     va_start(ap, fmt);
12917501267eSbellard     fprintf(stderr, "qemu: fatal: ");
12927501267eSbellard     vfprintf(stderr, fmt, ap);
12937501267eSbellard     fprintf(stderr, "\n");
12947501267eSbellard #ifdef TARGET_I386
12957fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
12967fe48483Sbellard #else
12977fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
12987501267eSbellard #endif
12997501267eSbellard     va_end(ap);
1300924edcaeSbalrog     if (logfile) {
1301924edcaeSbalrog         fflush(logfile);
1302924edcaeSbalrog         fclose(logfile);
1303924edcaeSbalrog     }
13047501267eSbellard     abort();
13057501267eSbellard }
13067501267eSbellard 
1307c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1308c5be9f08Sths {
1309c5be9f08Sths     CPUState *new_env = cpu_init();
1310c5be9f08Sths     /* preserve chaining and index */
1311c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1312c5be9f08Sths     int cpu_index = new_env->cpu_index;
1313c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
1314c5be9f08Sths     new_env->next_cpu = next_cpu;
1315c5be9f08Sths     new_env->cpu_index = cpu_index;
1316c5be9f08Sths     return new_env;
1317c5be9f08Sths }
1318c5be9f08Sths 
13190124311eSbellard #if !defined(CONFIG_USER_ONLY)
13200124311eSbellard 
1321ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1322ee8b7021Sbellard    implemented yet) */
1323ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
132433417e70Sbellard {
132533417e70Sbellard     int i;
13260124311eSbellard 
13279fa3e853Sbellard #if defined(DEBUG_TLB)
13289fa3e853Sbellard     printf("tlb_flush:\n");
13299fa3e853Sbellard #endif
13300124311eSbellard     /* must reset current TB so that interrupts cannot modify the
13310124311eSbellard        links while we are modifying them */
13320124311eSbellard     env->current_tb = NULL;
13330124311eSbellard 
133433417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
133584b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
133684b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
133784b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
133884b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
133984b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
134084b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
13416fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
13426fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
13436fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
13446fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
13456fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
13466fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
13476fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
13486fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
13496fa4cea9Sj_mayer #endif
13506fa4cea9Sj_mayer #endif
135133417e70Sbellard     }
13529fa3e853Sbellard 
13538a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
13549fa3e853Sbellard 
13559fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13569fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
13579fa3e853Sbellard #endif
13580a962c02Sbellard #ifdef USE_KQEMU
13590a962c02Sbellard     if (env->kqemu_enabled) {
13600a962c02Sbellard         kqemu_flush(env, flush_global);
13610a962c02Sbellard     }
13620a962c02Sbellard #endif
1363e3db7226Sbellard     tlb_flush_count++;
136433417e70Sbellard }
136533417e70Sbellard 
1366274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
136761382a50Sbellard {
136884b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
136984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
137084b7b8e7Sbellard         addr == (tlb_entry->addr_write &
137184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
137284b7b8e7Sbellard         addr == (tlb_entry->addr_code &
137384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
137484b7b8e7Sbellard         tlb_entry->addr_read = -1;
137584b7b8e7Sbellard         tlb_entry->addr_write = -1;
137684b7b8e7Sbellard         tlb_entry->addr_code = -1;
137784b7b8e7Sbellard     }
137861382a50Sbellard }
137961382a50Sbellard 
13802e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
138133417e70Sbellard {
13828a40a180Sbellard     int i;
13839fa3e853Sbellard     TranslationBlock *tb;
13840124311eSbellard 
13859fa3e853Sbellard #if defined(DEBUG_TLB)
1386108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
13879fa3e853Sbellard #endif
13880124311eSbellard     /* must reset current TB so that interrupts cannot modify the
13890124311eSbellard        links while we are modifying them */
13900124311eSbellard     env->current_tb = NULL;
139133417e70Sbellard 
139261382a50Sbellard     addr &= TARGET_PAGE_MASK;
139333417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
139484b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
139584b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
13966fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
13976fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
13986fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
13996fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
14006fa4cea9Sj_mayer #endif
14016fa4cea9Sj_mayer #endif
14020124311eSbellard 
1403b362e5e0Spbrook     /* Discard jump cache entries for any tb which might potentially
1404b362e5e0Spbrook        overlap the flushed page.  */
1405b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1407b362e5e0Spbrook 
1408b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr);
1409b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
141061382a50Sbellard 
14119fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14129fa3e853Sbellard     if (addr < MMAP_AREA_END)
14139fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
14149fa3e853Sbellard #endif
14150a962c02Sbellard #ifdef USE_KQEMU
14160a962c02Sbellard     if (env->kqemu_enabled) {
14170a962c02Sbellard         kqemu_flush_page(env, addr);
14180a962c02Sbellard     }
14190a962c02Sbellard #endif
14209fa3e853Sbellard }
14219fa3e853Sbellard 
14229fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
14239fa3e853Sbellard    can be detected */
14246a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
142561382a50Sbellard {
14266a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
14276a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
14286a00d601Sbellard                                     CODE_DIRTY_FLAG);
14299fa3e853Sbellard }
14309fa3e853Sbellard 
14319fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
14323a7d929eSbellard    tested for self modifying code */
14333a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
14343a7d929eSbellard                                     target_ulong vaddr)
14359fa3e853Sbellard {
14363a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
14379fa3e853Sbellard }
14389fa3e853Sbellard 
14391ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
14401ccde1cbSbellard                                          unsigned long start, unsigned long length)
14411ccde1cbSbellard {
14421ccde1cbSbellard     unsigned long addr;
144384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
144484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14451ccde1cbSbellard         if ((addr - start) < length) {
144684b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
14471ccde1cbSbellard         }
14481ccde1cbSbellard     }
14491ccde1cbSbellard }
14501ccde1cbSbellard 
14513a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
14520a962c02Sbellard                                      int dirty_flags)
14531ccde1cbSbellard {
14541ccde1cbSbellard     CPUState *env;
14554f2ac237Sbellard     unsigned long length, start1;
14560a962c02Sbellard     int i, mask, len;
14570a962c02Sbellard     uint8_t *p;
14581ccde1cbSbellard 
14591ccde1cbSbellard     start &= TARGET_PAGE_MASK;
14601ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
14611ccde1cbSbellard 
14621ccde1cbSbellard     length = end - start;
14631ccde1cbSbellard     if (length == 0)
14641ccde1cbSbellard         return;
14650a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
14663a7d929eSbellard #ifdef USE_KQEMU
14676a00d601Sbellard     /* XXX: should not depend on cpu context */
14686a00d601Sbellard     env = first_cpu;
14693a7d929eSbellard     if (env->kqemu_enabled) {
1470f23db169Sbellard         ram_addr_t addr;
1471f23db169Sbellard         addr = start;
1472f23db169Sbellard         for(i = 0; i < len; i++) {
1473f23db169Sbellard             kqemu_set_notdirty(env, addr);
1474f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1475f23db169Sbellard         }
14763a7d929eSbellard     }
14773a7d929eSbellard #endif
1478f23db169Sbellard     mask = ~dirty_flags;
1479f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1480f23db169Sbellard     for(i = 0; i < len; i++)
1481f23db169Sbellard         p[i] &= mask;
1482f23db169Sbellard 
14831ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
14841ccde1cbSbellard        when accessing the range */
148559817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
14866a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
14871ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
148884b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
14891ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
149084b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
14916fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
14926fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
14936fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
14946fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
14956fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
14966fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
14976fa4cea9Sj_mayer #endif
14986fa4cea9Sj_mayer #endif
14996a00d601Sbellard     }
150059817ccbSbellard 
150159817ccbSbellard #if !defined(CONFIG_SOFTMMU)
150259817ccbSbellard     /* XXX: this is expensive */
150359817ccbSbellard     {
150459817ccbSbellard         VirtPageDesc *p;
150559817ccbSbellard         int j;
150659817ccbSbellard         target_ulong addr;
150759817ccbSbellard 
150859817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
150959817ccbSbellard             p = l1_virt_map[i];
151059817ccbSbellard             if (p) {
151159817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
151259817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
151359817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
151459817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
151559817ccbSbellard                         (p->prot & PROT_WRITE)) {
151659817ccbSbellard                         if (addr < MMAP_AREA_END) {
151759817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
151859817ccbSbellard                                      p->prot & ~PROT_WRITE);
151959817ccbSbellard                         }
152059817ccbSbellard                     }
152159817ccbSbellard                     addr += TARGET_PAGE_SIZE;
152259817ccbSbellard                     p++;
152359817ccbSbellard                 }
152459817ccbSbellard             }
152559817ccbSbellard         }
152659817ccbSbellard     }
152759817ccbSbellard #endif
15281ccde1cbSbellard }
15291ccde1cbSbellard 
15303a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
15313a7d929eSbellard {
15323a7d929eSbellard     ram_addr_t ram_addr;
15333a7d929eSbellard 
153484b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
153584b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
15363a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
15373a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
153884b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
15393a7d929eSbellard         }
15403a7d929eSbellard     }
15413a7d929eSbellard }
15423a7d929eSbellard 
15433a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
15443a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
15453a7d929eSbellard {
15463a7d929eSbellard     int i;
15473a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
154884b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
15493a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
155084b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
15516fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15526fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15536fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
15546fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15556fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15566fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
15576fa4cea9Sj_mayer #endif
15586fa4cea9Sj_mayer #endif
15593a7d929eSbellard }
15603a7d929eSbellard 
15611ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
15621ccde1cbSbellard                                   unsigned long start)
15631ccde1cbSbellard {
15641ccde1cbSbellard     unsigned long addr;
156584b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
156684b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
15671ccde1cbSbellard         if (addr == start) {
156884b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
15691ccde1cbSbellard         }
15701ccde1cbSbellard     }
15711ccde1cbSbellard }
15721ccde1cbSbellard 
15731ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
15741ccde1cbSbellard    addr so that it is no longer dirty */
15756a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
15766a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
15771ccde1cbSbellard {
15781ccde1cbSbellard     int i;
15791ccde1cbSbellard 
15801ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
15811ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
158284b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
158384b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
15846fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15856fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[2][i], addr);
15866fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15876fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[3][i], addr);
15886fa4cea9Sj_mayer #endif
15896fa4cea9Sj_mayer #endif
15901ccde1cbSbellard }
15911ccde1cbSbellard 
159259817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
159359817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
159459817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
159559817ccbSbellard    conflicting with the host address space). */
159684b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
15972e12669aSbellard                       target_phys_addr_t paddr, int prot,
15989fa3e853Sbellard                       int is_user, int is_softmmu)
15999fa3e853Sbellard {
160092e873b9Sbellard     PhysPageDesc *p;
16014f2ac237Sbellard     unsigned long pd;
16029fa3e853Sbellard     unsigned int index;
16034f2ac237Sbellard     target_ulong address;
1604108c49b8Sbellard     target_phys_addr_t addend;
16059fa3e853Sbellard     int ret;
160684b7b8e7Sbellard     CPUTLBEntry *te;
16076658ffb8Spbrook     int i;
16089fa3e853Sbellard 
160992e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
16109fa3e853Sbellard     if (!p) {
16119fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
16129fa3e853Sbellard     } else {
16139fa3e853Sbellard         pd = p->phys_offset;
16149fa3e853Sbellard     }
16159fa3e853Sbellard #if defined(DEBUG_TLB)
16163a7d929eSbellard     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
161784b7b8e7Sbellard            vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
16189fa3e853Sbellard #endif
16199fa3e853Sbellard 
16209fa3e853Sbellard     ret = 0;
16219fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
16229fa3e853Sbellard     if (is_softmmu)
16239fa3e853Sbellard #endif
16249fa3e853Sbellard     {
16252a4188a3Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
16269fa3e853Sbellard             /* IO memory case */
16279fa3e853Sbellard             address = vaddr | pd;
16289fa3e853Sbellard             addend = paddr;
16299fa3e853Sbellard         } else {
16309fa3e853Sbellard             /* standard memory */
16319fa3e853Sbellard             address = vaddr;
16329fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
16339fa3e853Sbellard         }
16349fa3e853Sbellard 
16356658ffb8Spbrook         /* Make accesses to pages with watchpoints go via the
16366658ffb8Spbrook            watchpoint trap routines.  */
16376658ffb8Spbrook         for (i = 0; i < env->nb_watchpoints; i++) {
16386658ffb8Spbrook             if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
16396658ffb8Spbrook                 if (address & ~TARGET_PAGE_MASK) {
1640d79acba4Sbalrog                     env->watchpoint[i].addend = 0;
16416658ffb8Spbrook                     address = vaddr | io_mem_watch;
16426658ffb8Spbrook                 } else {
1643d79acba4Sbalrog                     env->watchpoint[i].addend = pd - paddr +
1644d79acba4Sbalrog                         (unsigned long) phys_ram_base;
16456658ffb8Spbrook                     /* TODO: Figure out how to make read watchpoints coexist
16466658ffb8Spbrook                        with code.  */
16476658ffb8Spbrook                     pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
16486658ffb8Spbrook                 }
16496658ffb8Spbrook             }
16506658ffb8Spbrook         }
16516658ffb8Spbrook 
165290f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
16539fa3e853Sbellard         addend -= vaddr;
165484b7b8e7Sbellard         te = &env->tlb_table[is_user][index];
165584b7b8e7Sbellard         te->addend = addend;
165667b915a5Sbellard         if (prot & PAGE_READ) {
165784b7b8e7Sbellard             te->addr_read = address;
16589fa3e853Sbellard         } else {
165984b7b8e7Sbellard             te->addr_read = -1;
166084b7b8e7Sbellard         }
166184b7b8e7Sbellard         if (prot & PAGE_EXEC) {
166284b7b8e7Sbellard             te->addr_code = address;
166384b7b8e7Sbellard         } else {
166484b7b8e7Sbellard             te->addr_code = -1;
16659fa3e853Sbellard         }
166667b915a5Sbellard         if (prot & PAGE_WRITE) {
1667856074ecSbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1668856074ecSbellard                 (pd & IO_MEM_ROMD)) {
1669856074ecSbellard                 /* write access calls the I/O callback */
1670856074ecSbellard                 te->addr_write = vaddr |
1671856074ecSbellard                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
16723a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
16731ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
167484b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
16759fa3e853Sbellard             } else {
167684b7b8e7Sbellard                 te->addr_write = address;
16779fa3e853Sbellard             }
16789fa3e853Sbellard         } else {
167984b7b8e7Sbellard             te->addr_write = -1;
16809fa3e853Sbellard         }
16819fa3e853Sbellard     }
16829fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
16839fa3e853Sbellard     else {
16849fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
16859fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
16869fa3e853Sbellard                soft MMU */
16879fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
16889fa3e853Sbellard                 ret = 2;
16899fa3e853Sbellard         } else {
16909fa3e853Sbellard             void *map_addr;
169159817ccbSbellard 
169259817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
169359817ccbSbellard                 ret = 2;
169459817ccbSbellard             } else {
16959fa3e853Sbellard                 if (prot & PROT_WRITE) {
169659817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1697d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
169859817ccbSbellard                         first_tb ||
1699d720b93dSbellard #endif
170059817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
170159817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
17029fa3e853Sbellard                         /* ROM: we do as if code was inside */
17039fa3e853Sbellard                         /* if code is present, we only map as read only and save the
17049fa3e853Sbellard                            original mapping */
17059fa3e853Sbellard                         VirtPageDesc *vp;
17069fa3e853Sbellard 
170790f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
17089fa3e853Sbellard                         vp->phys_addr = pd;
17099fa3e853Sbellard                         vp->prot = prot;
17109fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
17119fa3e853Sbellard                         prot &= ~PAGE_WRITE;
17129fa3e853Sbellard                     }
17139fa3e853Sbellard                 }
17149fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
17159fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
17169fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
17179fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
17189fa3e853Sbellard                               paddr, vaddr);
17199fa3e853Sbellard                 }
17209fa3e853Sbellard             }
17219fa3e853Sbellard         }
172259817ccbSbellard     }
17239fa3e853Sbellard #endif
17249fa3e853Sbellard     return ret;
17259fa3e853Sbellard }
17269fa3e853Sbellard 
17279fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17289fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
172953a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
17309fa3e853Sbellard {
17319fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
17329fa3e853Sbellard     VirtPageDesc *vp;
17339fa3e853Sbellard 
17349fa3e853Sbellard #if defined(DEBUG_TLB)
17359fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
17369fa3e853Sbellard #endif
17379fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
173859817ccbSbellard 
173959817ccbSbellard     /* if it is not mapped, no need to worry here */
174059817ccbSbellard     if (addr >= MMAP_AREA_END)
174159817ccbSbellard         return 0;
17429fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
17439fa3e853Sbellard     if (!vp)
17449fa3e853Sbellard         return 0;
17459fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
17469fa3e853Sbellard        validates only the code TLB */
17479fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
17489fa3e853Sbellard         return 0;
17499fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
17509fa3e853Sbellard         return 0;
17519fa3e853Sbellard #if defined(DEBUG_TLB)
17529fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
17539fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
17549fa3e853Sbellard #endif
175559817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
175659817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
175759817ccbSbellard                   (unsigned long)addr, vp->prot);
1758d720b93dSbellard     /* set the dirty bit */
17590a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1760d720b93dSbellard     /* flush the code inside */
1761d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
17629fa3e853Sbellard     return 1;
17639fa3e853Sbellard #else
17649fa3e853Sbellard     return 0;
17659fa3e853Sbellard #endif
176633417e70Sbellard }
176733417e70Sbellard 
17680124311eSbellard #else
17690124311eSbellard 
1770ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
17710124311eSbellard {
17720124311eSbellard }
17730124311eSbellard 
17742e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
17750124311eSbellard {
17760124311eSbellard }
17770124311eSbellard 
177884b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
17792e12669aSbellard                       target_phys_addr_t paddr, int prot,
17809fa3e853Sbellard                       int is_user, int is_softmmu)
178133417e70Sbellard {
17829fa3e853Sbellard     return 0;
178333417e70Sbellard }
178433417e70Sbellard 
17859fa3e853Sbellard /* dump memory mappings */
17869fa3e853Sbellard void page_dump(FILE *f)
178733417e70Sbellard {
17889fa3e853Sbellard     unsigned long start, end;
17899fa3e853Sbellard     int i, j, prot, prot1;
17909fa3e853Sbellard     PageDesc *p;
17919fa3e853Sbellard 
17929fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
17939fa3e853Sbellard             "start", "end", "size", "prot");
17949fa3e853Sbellard     start = -1;
17959fa3e853Sbellard     end = -1;
17969fa3e853Sbellard     prot = 0;
17979fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
17989fa3e853Sbellard         if (i < L1_SIZE)
17999fa3e853Sbellard             p = l1_map[i];
18009fa3e853Sbellard         else
18019fa3e853Sbellard             p = NULL;
18029fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
180333417e70Sbellard             if (!p)
18049fa3e853Sbellard                 prot1 = 0;
18059fa3e853Sbellard             else
18069fa3e853Sbellard                 prot1 = p[j].flags;
18079fa3e853Sbellard             if (prot1 != prot) {
18089fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
18099fa3e853Sbellard                 if (start != -1) {
18109fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
18119fa3e853Sbellard                             start, end, end - start,
18129fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
18139fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
18149fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
181533417e70Sbellard                 }
18169fa3e853Sbellard                 if (prot1 != 0)
18179fa3e853Sbellard                     start = end;
18189fa3e853Sbellard                 else
18199fa3e853Sbellard                     start = -1;
18209fa3e853Sbellard                 prot = prot1;
18219fa3e853Sbellard             }
18229fa3e853Sbellard             if (!p)
18239fa3e853Sbellard                 break;
18249fa3e853Sbellard         }
18259fa3e853Sbellard     }
18269fa3e853Sbellard }
18279fa3e853Sbellard 
182853a5960aSpbrook int page_get_flags(target_ulong address)
18299fa3e853Sbellard {
18309fa3e853Sbellard     PageDesc *p;
18319fa3e853Sbellard 
18329fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
18339fa3e853Sbellard     if (!p)
18349fa3e853Sbellard         return 0;
18359fa3e853Sbellard     return p->flags;
18369fa3e853Sbellard }
18379fa3e853Sbellard 
18389fa3e853Sbellard /* modify the flags of a page and invalidate the code if
18399fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
18409fa3e853Sbellard    depending on PAGE_WRITE */
184153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
18429fa3e853Sbellard {
18439fa3e853Sbellard     PageDesc *p;
184453a5960aSpbrook     target_ulong addr;
18459fa3e853Sbellard 
18469fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
18479fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
18489fa3e853Sbellard     if (flags & PAGE_WRITE)
18499fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
18509fa3e853Sbellard     spin_lock(&tb_lock);
18519fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
18529fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
18539fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
18549fa3e853Sbellard            inside */
18559fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
18569fa3e853Sbellard             (flags & PAGE_WRITE) &&
18579fa3e853Sbellard             p->first_tb) {
1858d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
18599fa3e853Sbellard         }
18609fa3e853Sbellard         p->flags = flags;
18619fa3e853Sbellard     }
18629fa3e853Sbellard     spin_unlock(&tb_lock);
18639fa3e853Sbellard }
18649fa3e853Sbellard 
18659fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
18669fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
186753a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
18689fa3e853Sbellard {
18699fa3e853Sbellard     unsigned int page_index, prot, pindex;
18709fa3e853Sbellard     PageDesc *p, *p1;
187153a5960aSpbrook     target_ulong host_start, host_end, addr;
18729fa3e853Sbellard 
187383fb7adfSbellard     host_start = address & qemu_host_page_mask;
18749fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
18759fa3e853Sbellard     p1 = page_find(page_index);
18769fa3e853Sbellard     if (!p1)
18779fa3e853Sbellard         return 0;
187883fb7adfSbellard     host_end = host_start + qemu_host_page_size;
18799fa3e853Sbellard     p = p1;
18809fa3e853Sbellard     prot = 0;
18819fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
18829fa3e853Sbellard         prot |= p->flags;
18839fa3e853Sbellard         p++;
18849fa3e853Sbellard     }
18859fa3e853Sbellard     /* if the page was really writable, then we change its
18869fa3e853Sbellard        protection back to writable */
18879fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
18889fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
18899fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
189053a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
18919fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
18929fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
18939fa3e853Sbellard             /* and since the content will be modified, we must invalidate
18949fa3e853Sbellard                the corresponding translated code. */
1895d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
18969fa3e853Sbellard #ifdef DEBUG_TB_CHECK
18979fa3e853Sbellard             tb_invalidate_check(address);
18989fa3e853Sbellard #endif
18999fa3e853Sbellard             return 1;
19009fa3e853Sbellard         }
19019fa3e853Sbellard     }
19029fa3e853Sbellard     return 0;
19039fa3e853Sbellard }
19049fa3e853Sbellard 
19059fa3e853Sbellard /* call this function when system calls directly modify a memory area */
190653a5960aSpbrook /* ??? This should be redundant now we have lock_user.  */
190753a5960aSpbrook void page_unprotect_range(target_ulong data, target_ulong data_size)
19089fa3e853Sbellard {
190953a5960aSpbrook     target_ulong start, end, addr;
19109fa3e853Sbellard 
191153a5960aSpbrook     start = data;
19129fa3e853Sbellard     end = start + data_size;
19139fa3e853Sbellard     start &= TARGET_PAGE_MASK;
19149fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
19159fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1916d720b93dSbellard         page_unprotect(addr, 0, NULL);
19179fa3e853Sbellard     }
19189fa3e853Sbellard }
19199fa3e853Sbellard 
19206a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
19216a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
19221ccde1cbSbellard {
19231ccde1cbSbellard }
19249fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
192533417e70Sbellard 
1926db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1927db7b5426Sblueswir1                              int memory);
1928db7b5426Sblueswir1 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1929db7b5426Sblueswir1                            int orig_memory);
1930db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1931db7b5426Sblueswir1                       need_subpage)                                     \
1932db7b5426Sblueswir1     do {                                                                \
1933db7b5426Sblueswir1         if (addr > start_addr)                                          \
1934db7b5426Sblueswir1             start_addr2 = 0;                                            \
1935db7b5426Sblueswir1         else {                                                          \
1936db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1937db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
1938db7b5426Sblueswir1                 need_subpage = 1;                                       \
1939db7b5426Sblueswir1         }                                                               \
1940db7b5426Sblueswir1                                                                         \
194149e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1942db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1943db7b5426Sblueswir1         else {                                                          \
1944db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1945db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1946db7b5426Sblueswir1                 need_subpage = 1;                                       \
1947db7b5426Sblueswir1         }                                                               \
1948db7b5426Sblueswir1     } while (0)
1949db7b5426Sblueswir1 
195033417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
195133417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
195233417e70Sbellard    io memory page */
19532e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
19542e12669aSbellard                                   unsigned long size,
19552e12669aSbellard                                   unsigned long phys_offset)
195633417e70Sbellard {
1957108c49b8Sbellard     target_phys_addr_t addr, end_addr;
195892e873b9Sbellard     PhysPageDesc *p;
19599d42037bSbellard     CPUState *env;
1960db7b5426Sblueswir1     unsigned long orig_size = size;
1961db7b5426Sblueswir1     void *subpage;
196233417e70Sbellard 
19635fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
196449e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
196549e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1966db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
1967db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1968db7b5426Sblueswir1             unsigned long orig_memory = p->phys_offset;
1969db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
1970db7b5426Sblueswir1             int need_subpage = 0;
1971db7b5426Sblueswir1 
1972db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1973db7b5426Sblueswir1                           need_subpage);
1974db7b5426Sblueswir1             if (need_subpage) {
1975db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1976db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
1977db7b5426Sblueswir1                                            &p->phys_offset, orig_memory);
1978db7b5426Sblueswir1                 } else {
1979db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1980db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
1981db7b5426Sblueswir1                 }
1982db7b5426Sblueswir1                 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1983db7b5426Sblueswir1             } else {
1984db7b5426Sblueswir1                 p->phys_offset = phys_offset;
1985db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1986db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
1987db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
1988db7b5426Sblueswir1             }
1989db7b5426Sblueswir1         } else {
1990108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
19919fa3e853Sbellard             p->phys_offset = phys_offset;
19922a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
19932a4188a3Sbellard                 (phys_offset & IO_MEM_ROMD))
199433417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
1995db7b5426Sblueswir1             else {
1996db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
1997db7b5426Sblueswir1                 int need_subpage = 0;
1998db7b5426Sblueswir1 
1999db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2000db7b5426Sblueswir1                               end_addr2, need_subpage);
2001db7b5426Sblueswir1 
2002db7b5426Sblueswir1                 if (need_subpage) {
2003db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2004db7b5426Sblueswir1                                            &p->phys_offset, IO_MEM_UNASSIGNED);
2005db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
2006db7b5426Sblueswir1                                      phys_offset);
2007db7b5426Sblueswir1                 }
2008db7b5426Sblueswir1             }
2009db7b5426Sblueswir1         }
201033417e70Sbellard     }
20119d42037bSbellard 
20129d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
20139d42037bSbellard        reset the modified entries */
20149d42037bSbellard     /* XXX: slow ! */
20159d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
20169d42037bSbellard         tlb_flush(env, 1);
20179d42037bSbellard     }
201833417e70Sbellard }
201933417e70Sbellard 
2020ba863458Sbellard /* XXX: temporary until new memory mapping API */
2021ba863458Sbellard uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2022ba863458Sbellard {
2023ba863458Sbellard     PhysPageDesc *p;
2024ba863458Sbellard 
2025ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2026ba863458Sbellard     if (!p)
2027ba863458Sbellard         return IO_MEM_UNASSIGNED;
2028ba863458Sbellard     return p->phys_offset;
2029ba863458Sbellard }
2030ba863458Sbellard 
2031e9a1ab19Sbellard /* XXX: better than nothing */
2032e9a1ab19Sbellard ram_addr_t qemu_ram_alloc(unsigned int size)
2033e9a1ab19Sbellard {
2034e9a1ab19Sbellard     ram_addr_t addr;
2035e9a1ab19Sbellard     if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2036e9a1ab19Sbellard         fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2037e9a1ab19Sbellard                 size, phys_ram_size);
2038e9a1ab19Sbellard         abort();
2039e9a1ab19Sbellard     }
2040e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2041e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2042e9a1ab19Sbellard     return addr;
2043e9a1ab19Sbellard }
2044e9a1ab19Sbellard 
2045e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2046e9a1ab19Sbellard {
2047e9a1ab19Sbellard }
2048e9a1ab19Sbellard 
2049a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
205033417e70Sbellard {
205167d3b957Spbrook #ifdef DEBUG_UNASSIGNED
20526c36d3faSblueswir1     printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
205367d3b957Spbrook #endif
2054b4f0a316Sblueswir1 #ifdef TARGET_SPARC
20556c36d3faSblueswir1     do_unassigned_access(addr, 0, 0, 0);
2056b4f0a316Sblueswir1 #endif
205733417e70Sbellard     return 0;
205833417e70Sbellard }
205933417e70Sbellard 
2060a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
206133417e70Sbellard {
206267d3b957Spbrook #ifdef DEBUG_UNASSIGNED
20636c36d3faSblueswir1     printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
206467d3b957Spbrook #endif
2065b4f0a316Sblueswir1 #ifdef TARGET_SPARC
20666c36d3faSblueswir1     do_unassigned_access(addr, 1, 0, 0);
2067b4f0a316Sblueswir1 #endif
206833417e70Sbellard }
206933417e70Sbellard 
207033417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
207133417e70Sbellard     unassigned_mem_readb,
207233417e70Sbellard     unassigned_mem_readb,
207333417e70Sbellard     unassigned_mem_readb,
207433417e70Sbellard };
207533417e70Sbellard 
207633417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
207733417e70Sbellard     unassigned_mem_writeb,
207833417e70Sbellard     unassigned_mem_writeb,
207933417e70Sbellard     unassigned_mem_writeb,
208033417e70Sbellard };
208133417e70Sbellard 
2082a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
20831ccde1cbSbellard {
20843a7d929eSbellard     unsigned long ram_addr;
20853a7d929eSbellard     int dirty_flags;
20863a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
20873a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20883a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
20893a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
20903a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
20913a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
20923a7d929eSbellard #endif
20933a7d929eSbellard     }
2094c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
2095f32fc648Sbellard #ifdef USE_KQEMU
2096f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2097f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2098f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2099f32fc648Sbellard #endif
2100f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2101f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2102f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2103f23db169Sbellard        flushed */
2104f23db169Sbellard     if (dirty_flags == 0xff)
21056a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
21061ccde1cbSbellard }
21071ccde1cbSbellard 
2108a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
21091ccde1cbSbellard {
21103a7d929eSbellard     unsigned long ram_addr;
21113a7d929eSbellard     int dirty_flags;
21123a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
21133a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21143a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
21153a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
21163a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
21173a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21183a7d929eSbellard #endif
21193a7d929eSbellard     }
2120c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
2121f32fc648Sbellard #ifdef USE_KQEMU
2122f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2123f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2124f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2125f32fc648Sbellard #endif
2126f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2127f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2128f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2129f23db169Sbellard        flushed */
2130f23db169Sbellard     if (dirty_flags == 0xff)
21316a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
21321ccde1cbSbellard }
21331ccde1cbSbellard 
2134a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
21351ccde1cbSbellard {
21363a7d929eSbellard     unsigned long ram_addr;
21373a7d929eSbellard     int dirty_flags;
21383a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
21393a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21403a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
21413a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
21423a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
21433a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21443a7d929eSbellard #endif
21453a7d929eSbellard     }
2146c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
2147f32fc648Sbellard #ifdef USE_KQEMU
2148f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2149f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2150f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2151f32fc648Sbellard #endif
2152f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2153f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2154f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2155f23db169Sbellard        flushed */
2156f23db169Sbellard     if (dirty_flags == 0xff)
21576a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
21581ccde1cbSbellard }
21591ccde1cbSbellard 
21603a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
21613a7d929eSbellard     NULL, /* never used */
21623a7d929eSbellard     NULL, /* never used */
21633a7d929eSbellard     NULL, /* never used */
21643a7d929eSbellard };
21653a7d929eSbellard 
21661ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
21671ccde1cbSbellard     notdirty_mem_writeb,
21681ccde1cbSbellard     notdirty_mem_writew,
21691ccde1cbSbellard     notdirty_mem_writel,
21701ccde1cbSbellard };
21711ccde1cbSbellard 
21726658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
21736658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
21746658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
21756658ffb8Spbrook    phys routines.  */
21766658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
21776658ffb8Spbrook {
21786658ffb8Spbrook     return ldub_phys(addr);
21796658ffb8Spbrook }
21806658ffb8Spbrook 
21816658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
21826658ffb8Spbrook {
21836658ffb8Spbrook     return lduw_phys(addr);
21846658ffb8Spbrook }
21856658ffb8Spbrook 
21866658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
21876658ffb8Spbrook {
21886658ffb8Spbrook     return ldl_phys(addr);
21896658ffb8Spbrook }
21906658ffb8Spbrook 
21916658ffb8Spbrook /* Generate a debug exception if a watchpoint has been hit.
21926658ffb8Spbrook    Returns the real physical address of the access.  addr will be a host
2193d79acba4Sbalrog    address in case of a RAM location.  */
21946658ffb8Spbrook static target_ulong check_watchpoint(target_phys_addr_t addr)
21956658ffb8Spbrook {
21966658ffb8Spbrook     CPUState *env = cpu_single_env;
21976658ffb8Spbrook     target_ulong watch;
21986658ffb8Spbrook     target_ulong retaddr;
21996658ffb8Spbrook     int i;
22006658ffb8Spbrook 
22016658ffb8Spbrook     retaddr = addr;
22026658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
22036658ffb8Spbrook         watch = env->watchpoint[i].vaddr;
22046658ffb8Spbrook         if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2205d79acba4Sbalrog             retaddr = addr - env->watchpoint[i].addend;
22066658ffb8Spbrook             if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
22076658ffb8Spbrook                 cpu_single_env->watchpoint_hit = i + 1;
22086658ffb8Spbrook                 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
22096658ffb8Spbrook                 break;
22106658ffb8Spbrook             }
22116658ffb8Spbrook         }
22126658ffb8Spbrook     }
22136658ffb8Spbrook     return retaddr;
22146658ffb8Spbrook }
22156658ffb8Spbrook 
22166658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
22176658ffb8Spbrook                              uint32_t val)
22186658ffb8Spbrook {
22196658ffb8Spbrook     addr = check_watchpoint(addr);
22206658ffb8Spbrook     stb_phys(addr, val);
22216658ffb8Spbrook }
22226658ffb8Spbrook 
22236658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
22246658ffb8Spbrook                              uint32_t val)
22256658ffb8Spbrook {
22266658ffb8Spbrook     addr = check_watchpoint(addr);
22276658ffb8Spbrook     stw_phys(addr, val);
22286658ffb8Spbrook }
22296658ffb8Spbrook 
22306658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
22316658ffb8Spbrook                              uint32_t val)
22326658ffb8Spbrook {
22336658ffb8Spbrook     addr = check_watchpoint(addr);
22346658ffb8Spbrook     stl_phys(addr, val);
22356658ffb8Spbrook }
22366658ffb8Spbrook 
22376658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
22386658ffb8Spbrook     watch_mem_readb,
22396658ffb8Spbrook     watch_mem_readw,
22406658ffb8Spbrook     watch_mem_readl,
22416658ffb8Spbrook };
22426658ffb8Spbrook 
22436658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
22446658ffb8Spbrook     watch_mem_writeb,
22456658ffb8Spbrook     watch_mem_writew,
22466658ffb8Spbrook     watch_mem_writel,
22476658ffb8Spbrook };
22486658ffb8Spbrook #endif
22496658ffb8Spbrook 
2250db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2251db7b5426Sblueswir1                                  unsigned int len)
2252db7b5426Sblueswir1 {
2253db7b5426Sblueswir1     CPUReadMemoryFunc **mem_read;
2254db7b5426Sblueswir1     uint32_t ret;
2255db7b5426Sblueswir1     unsigned int idx;
2256db7b5426Sblueswir1 
2257db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2258db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2259db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2260db7b5426Sblueswir1            mmio, len, addr, idx);
2261db7b5426Sblueswir1 #endif
2262db7b5426Sblueswir1     mem_read = mmio->mem_read[idx];
2263db7b5426Sblueswir1     ret = (*mem_read[len])(mmio->opaque[idx], addr);
2264db7b5426Sblueswir1 
2265db7b5426Sblueswir1     return ret;
2266db7b5426Sblueswir1 }
2267db7b5426Sblueswir1 
2268db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2269db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2270db7b5426Sblueswir1 {
2271db7b5426Sblueswir1     CPUWriteMemoryFunc **mem_write;
2272db7b5426Sblueswir1     unsigned int idx;
2273db7b5426Sblueswir1 
2274db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2275db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2276db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2277db7b5426Sblueswir1            mmio, len, addr, idx, value);
2278db7b5426Sblueswir1 #endif
2279db7b5426Sblueswir1     mem_write = mmio->mem_write[idx];
2280db7b5426Sblueswir1     (*mem_write[len])(mmio->opaque[idx], addr, value);
2281db7b5426Sblueswir1 }
2282db7b5426Sblueswir1 
2283db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2284db7b5426Sblueswir1 {
2285db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2286db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2287db7b5426Sblueswir1 #endif
2288db7b5426Sblueswir1 
2289db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2290db7b5426Sblueswir1 }
2291db7b5426Sblueswir1 
2292db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2293db7b5426Sblueswir1                             uint32_t value)
2294db7b5426Sblueswir1 {
2295db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2296db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2297db7b5426Sblueswir1 #endif
2298db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2299db7b5426Sblueswir1 }
2300db7b5426Sblueswir1 
2301db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2302db7b5426Sblueswir1 {
2303db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2304db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2305db7b5426Sblueswir1 #endif
2306db7b5426Sblueswir1 
2307db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2308db7b5426Sblueswir1 }
2309db7b5426Sblueswir1 
2310db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2311db7b5426Sblueswir1                             uint32_t value)
2312db7b5426Sblueswir1 {
2313db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2314db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2315db7b5426Sblueswir1 #endif
2316db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2317db7b5426Sblueswir1 }
2318db7b5426Sblueswir1 
2319db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2320db7b5426Sblueswir1 {
2321db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2322db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2323db7b5426Sblueswir1 #endif
2324db7b5426Sblueswir1 
2325db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2326db7b5426Sblueswir1 }
2327db7b5426Sblueswir1 
2328db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2329db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2330db7b5426Sblueswir1 {
2331db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2332db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2333db7b5426Sblueswir1 #endif
2334db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2335db7b5426Sblueswir1 }
2336db7b5426Sblueswir1 
2337db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2338db7b5426Sblueswir1     &subpage_readb,
2339db7b5426Sblueswir1     &subpage_readw,
2340db7b5426Sblueswir1     &subpage_readl,
2341db7b5426Sblueswir1 };
2342db7b5426Sblueswir1 
2343db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2344db7b5426Sblueswir1     &subpage_writeb,
2345db7b5426Sblueswir1     &subpage_writew,
2346db7b5426Sblueswir1     &subpage_writel,
2347db7b5426Sblueswir1 };
2348db7b5426Sblueswir1 
2349db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2350db7b5426Sblueswir1                              int memory)
2351db7b5426Sblueswir1 {
2352db7b5426Sblueswir1     int idx, eidx;
2353db7b5426Sblueswir1 
2354db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2355db7b5426Sblueswir1         return -1;
2356db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2357db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2358db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2359db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2360db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2361db7b5426Sblueswir1 #endif
2362db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2363db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
2364db7b5426Sblueswir1         mmio->mem_read[idx] = io_mem_read[memory];
2365db7b5426Sblueswir1         mmio->mem_write[idx] = io_mem_write[memory];
2366db7b5426Sblueswir1         mmio->opaque[idx] = io_mem_opaque[memory];
2367db7b5426Sblueswir1     }
2368db7b5426Sblueswir1 
2369db7b5426Sblueswir1     return 0;
2370db7b5426Sblueswir1 }
2371db7b5426Sblueswir1 
2372db7b5426Sblueswir1 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2373db7b5426Sblueswir1                            int orig_memory)
2374db7b5426Sblueswir1 {
2375db7b5426Sblueswir1     subpage_t *mmio;
2376db7b5426Sblueswir1     int subpage_memory;
2377db7b5426Sblueswir1 
2378db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
2379db7b5426Sblueswir1     if (mmio != NULL) {
2380db7b5426Sblueswir1         mmio->base = base;
2381db7b5426Sblueswir1         subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2382db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2383db7b5426Sblueswir1         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2384db7b5426Sblueswir1                mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2385db7b5426Sblueswir1 #endif
2386db7b5426Sblueswir1         *phys = subpage_memory | IO_MEM_SUBPAGE;
2387db7b5426Sblueswir1         subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2388db7b5426Sblueswir1     }
2389db7b5426Sblueswir1 
2390db7b5426Sblueswir1     return mmio;
2391db7b5426Sblueswir1 }
2392db7b5426Sblueswir1 
239333417e70Sbellard static void io_mem_init(void)
239433417e70Sbellard {
23953a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2396a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
23973a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
23981ccde1cbSbellard     io_mem_nb = 5;
23991ccde1cbSbellard 
24006658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
24016658ffb8Spbrook     io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
24026658ffb8Spbrook                                           watch_mem_write, NULL);
24036658ffb8Spbrook #endif
24041ccde1cbSbellard     /* alloc dirty bits array */
24050a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
24063a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
240733417e70Sbellard }
240833417e70Sbellard 
240933417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
241033417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
241133417e70Sbellard    2). All functions must be supplied. If io_index is non zero, the
241233417e70Sbellard    corresponding io zone is modified. If it is zero, a new io zone is
241333417e70Sbellard    allocated. The return value can be used with
241433417e70Sbellard    cpu_register_physical_memory(). (-1) is returned if error. */
241533417e70Sbellard int cpu_register_io_memory(int io_index,
241633417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2417a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2418a4193c8aSbellard                            void *opaque)
241933417e70Sbellard {
242033417e70Sbellard     int i;
242133417e70Sbellard 
242233417e70Sbellard     if (io_index <= 0) {
2423b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
242433417e70Sbellard             return -1;
242533417e70Sbellard         io_index = io_mem_nb++;
242633417e70Sbellard     } else {
242733417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
242833417e70Sbellard             return -1;
242933417e70Sbellard     }
243033417e70Sbellard 
243133417e70Sbellard     for(i = 0;i < 3; i++) {
243233417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
243333417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
243433417e70Sbellard     }
2435a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
243633417e70Sbellard     return io_index << IO_MEM_SHIFT;
243733417e70Sbellard }
243861382a50Sbellard 
24398926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
24408926b517Sbellard {
24418926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
24428926b517Sbellard }
24438926b517Sbellard 
24448926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
24458926b517Sbellard {
24468926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
24478926b517Sbellard }
24488926b517Sbellard 
244913eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
245013eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
24512e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
245213eb76e0Sbellard                             int len, int is_write)
245313eb76e0Sbellard {
245413eb76e0Sbellard     int l, flags;
245513eb76e0Sbellard     target_ulong page;
245653a5960aSpbrook     void * p;
245713eb76e0Sbellard 
245813eb76e0Sbellard     while (len > 0) {
245913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
246013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
246113eb76e0Sbellard         if (l > len)
246213eb76e0Sbellard             l = len;
246313eb76e0Sbellard         flags = page_get_flags(page);
246413eb76e0Sbellard         if (!(flags & PAGE_VALID))
246513eb76e0Sbellard             return;
246613eb76e0Sbellard         if (is_write) {
246713eb76e0Sbellard             if (!(flags & PAGE_WRITE))
246813eb76e0Sbellard                 return;
246953a5960aSpbrook             p = lock_user(addr, len, 0);
247053a5960aSpbrook             memcpy(p, buf, len);
247153a5960aSpbrook             unlock_user(p, addr, len);
247213eb76e0Sbellard         } else {
247313eb76e0Sbellard             if (!(flags & PAGE_READ))
247413eb76e0Sbellard                 return;
247553a5960aSpbrook             p = lock_user(addr, len, 1);
247653a5960aSpbrook             memcpy(buf, p, len);
247753a5960aSpbrook             unlock_user(p, addr, 0);
247813eb76e0Sbellard         }
247913eb76e0Sbellard         len -= l;
248013eb76e0Sbellard         buf += l;
248113eb76e0Sbellard         addr += l;
248213eb76e0Sbellard     }
248313eb76e0Sbellard }
24848df1cd07Sbellard 
248513eb76e0Sbellard #else
24862e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
248713eb76e0Sbellard                             int len, int is_write)
248813eb76e0Sbellard {
248913eb76e0Sbellard     int l, io_index;
249013eb76e0Sbellard     uint8_t *ptr;
249113eb76e0Sbellard     uint32_t val;
24922e12669aSbellard     target_phys_addr_t page;
24932e12669aSbellard     unsigned long pd;
249492e873b9Sbellard     PhysPageDesc *p;
249513eb76e0Sbellard 
249613eb76e0Sbellard     while (len > 0) {
249713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
249813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
249913eb76e0Sbellard         if (l > len)
250013eb76e0Sbellard             l = len;
250192e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
250213eb76e0Sbellard         if (!p) {
250313eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
250413eb76e0Sbellard         } else {
250513eb76e0Sbellard             pd = p->phys_offset;
250613eb76e0Sbellard         }
250713eb76e0Sbellard 
250813eb76e0Sbellard         if (is_write) {
25093a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
251013eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
25116a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
25126a00d601Sbellard                    potential bugs */
251313eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
25141c213d19Sbellard                     /* 32 bit write access */
2515c27004ecSbellard                     val = ldl_p(buf);
2516a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
251713eb76e0Sbellard                     l = 4;
251813eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
25191c213d19Sbellard                     /* 16 bit write access */
2520c27004ecSbellard                     val = lduw_p(buf);
2521a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
252213eb76e0Sbellard                     l = 2;
252313eb76e0Sbellard                 } else {
25241c213d19Sbellard                     /* 8 bit write access */
2525c27004ecSbellard                     val = ldub_p(buf);
2526a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
252713eb76e0Sbellard                     l = 1;
252813eb76e0Sbellard                 }
252913eb76e0Sbellard             } else {
2530b448f2f3Sbellard                 unsigned long addr1;
2531b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
253213eb76e0Sbellard                 /* RAM case */
2533b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
253413eb76e0Sbellard                 memcpy(ptr, buf, l);
25353a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2536b448f2f3Sbellard                     /* invalidate code */
2537b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2538b448f2f3Sbellard                     /* set dirty bit */
2539f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2540f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
254113eb76e0Sbellard                 }
25423a7d929eSbellard             }
254313eb76e0Sbellard         } else {
25442a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
25452a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
254613eb76e0Sbellard                 /* I/O case */
254713eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
254813eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
254913eb76e0Sbellard                     /* 32 bit read access */
2550a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2551c27004ecSbellard                     stl_p(buf, val);
255213eb76e0Sbellard                     l = 4;
255313eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
255413eb76e0Sbellard                     /* 16 bit read access */
2555a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2556c27004ecSbellard                     stw_p(buf, val);
255713eb76e0Sbellard                     l = 2;
255813eb76e0Sbellard                 } else {
25591c213d19Sbellard                     /* 8 bit read access */
2560a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2561c27004ecSbellard                     stb_p(buf, val);
256213eb76e0Sbellard                     l = 1;
256313eb76e0Sbellard                 }
256413eb76e0Sbellard             } else {
256513eb76e0Sbellard                 /* RAM case */
256613eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
256713eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
256813eb76e0Sbellard                 memcpy(buf, ptr, l);
256913eb76e0Sbellard             }
257013eb76e0Sbellard         }
257113eb76e0Sbellard         len -= l;
257213eb76e0Sbellard         buf += l;
257313eb76e0Sbellard         addr += l;
257413eb76e0Sbellard     }
257513eb76e0Sbellard }
25768df1cd07Sbellard 
2577d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2578d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2579d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2580d0ecd2aaSbellard {
2581d0ecd2aaSbellard     int l;
2582d0ecd2aaSbellard     uint8_t *ptr;
2583d0ecd2aaSbellard     target_phys_addr_t page;
2584d0ecd2aaSbellard     unsigned long pd;
2585d0ecd2aaSbellard     PhysPageDesc *p;
2586d0ecd2aaSbellard 
2587d0ecd2aaSbellard     while (len > 0) {
2588d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2589d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2590d0ecd2aaSbellard         if (l > len)
2591d0ecd2aaSbellard             l = len;
2592d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2593d0ecd2aaSbellard         if (!p) {
2594d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2595d0ecd2aaSbellard         } else {
2596d0ecd2aaSbellard             pd = p->phys_offset;
2597d0ecd2aaSbellard         }
2598d0ecd2aaSbellard 
2599d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
26002a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
26012a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2602d0ecd2aaSbellard             /* do nothing */
2603d0ecd2aaSbellard         } else {
2604d0ecd2aaSbellard             unsigned long addr1;
2605d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2606d0ecd2aaSbellard             /* ROM/RAM case */
2607d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2608d0ecd2aaSbellard             memcpy(ptr, buf, l);
2609d0ecd2aaSbellard         }
2610d0ecd2aaSbellard         len -= l;
2611d0ecd2aaSbellard         buf += l;
2612d0ecd2aaSbellard         addr += l;
2613d0ecd2aaSbellard     }
2614d0ecd2aaSbellard }
2615d0ecd2aaSbellard 
2616d0ecd2aaSbellard 
26178df1cd07Sbellard /* warning: addr must be aligned */
26188df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
26198df1cd07Sbellard {
26208df1cd07Sbellard     int io_index;
26218df1cd07Sbellard     uint8_t *ptr;
26228df1cd07Sbellard     uint32_t val;
26238df1cd07Sbellard     unsigned long pd;
26248df1cd07Sbellard     PhysPageDesc *p;
26258df1cd07Sbellard 
26268df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
26278df1cd07Sbellard     if (!p) {
26288df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
26298df1cd07Sbellard     } else {
26308df1cd07Sbellard         pd = p->phys_offset;
26318df1cd07Sbellard     }
26328df1cd07Sbellard 
26332a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
26342a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
26358df1cd07Sbellard         /* I/O case */
26368df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
26378df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
26388df1cd07Sbellard     } else {
26398df1cd07Sbellard         /* RAM case */
26408df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
26418df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
26428df1cd07Sbellard         val = ldl_p(ptr);
26438df1cd07Sbellard     }
26448df1cd07Sbellard     return val;
26458df1cd07Sbellard }
26468df1cd07Sbellard 
264784b7b8e7Sbellard /* warning: addr must be aligned */
264884b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
264984b7b8e7Sbellard {
265084b7b8e7Sbellard     int io_index;
265184b7b8e7Sbellard     uint8_t *ptr;
265284b7b8e7Sbellard     uint64_t val;
265384b7b8e7Sbellard     unsigned long pd;
265484b7b8e7Sbellard     PhysPageDesc *p;
265584b7b8e7Sbellard 
265684b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
265784b7b8e7Sbellard     if (!p) {
265884b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
265984b7b8e7Sbellard     } else {
266084b7b8e7Sbellard         pd = p->phys_offset;
266184b7b8e7Sbellard     }
266284b7b8e7Sbellard 
26632a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
26642a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
266584b7b8e7Sbellard         /* I/O case */
266684b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
266784b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
266884b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
266984b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
267084b7b8e7Sbellard #else
267184b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
267284b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
267384b7b8e7Sbellard #endif
267484b7b8e7Sbellard     } else {
267584b7b8e7Sbellard         /* RAM case */
267684b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
267784b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
267884b7b8e7Sbellard         val = ldq_p(ptr);
267984b7b8e7Sbellard     }
268084b7b8e7Sbellard     return val;
268184b7b8e7Sbellard }
268284b7b8e7Sbellard 
2683aab33094Sbellard /* XXX: optimize */
2684aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2685aab33094Sbellard {
2686aab33094Sbellard     uint8_t val;
2687aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2688aab33094Sbellard     return val;
2689aab33094Sbellard }
2690aab33094Sbellard 
2691aab33094Sbellard /* XXX: optimize */
2692aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2693aab33094Sbellard {
2694aab33094Sbellard     uint16_t val;
2695aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2696aab33094Sbellard     return tswap16(val);
2697aab33094Sbellard }
2698aab33094Sbellard 
26998df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
27008df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
27018df1cd07Sbellard    bits are used to track modified PTEs */
27028df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
27038df1cd07Sbellard {
27048df1cd07Sbellard     int io_index;
27058df1cd07Sbellard     uint8_t *ptr;
27068df1cd07Sbellard     unsigned long pd;
27078df1cd07Sbellard     PhysPageDesc *p;
27088df1cd07Sbellard 
27098df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
27108df1cd07Sbellard     if (!p) {
27118df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
27128df1cd07Sbellard     } else {
27138df1cd07Sbellard         pd = p->phys_offset;
27148df1cd07Sbellard     }
27158df1cd07Sbellard 
27163a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
27178df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27188df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
27198df1cd07Sbellard     } else {
27208df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
27218df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
27228df1cd07Sbellard         stl_p(ptr, val);
27238df1cd07Sbellard     }
27248df1cd07Sbellard }
27258df1cd07Sbellard 
2726bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2727bc98a7efSj_mayer {
2728bc98a7efSj_mayer     int io_index;
2729bc98a7efSj_mayer     uint8_t *ptr;
2730bc98a7efSj_mayer     unsigned long pd;
2731bc98a7efSj_mayer     PhysPageDesc *p;
2732bc98a7efSj_mayer 
2733bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2734bc98a7efSj_mayer     if (!p) {
2735bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
2736bc98a7efSj_mayer     } else {
2737bc98a7efSj_mayer         pd = p->phys_offset;
2738bc98a7efSj_mayer     }
2739bc98a7efSj_mayer 
2740bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2741bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
2743bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2744bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2745bc98a7efSj_mayer #else
2746bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2747bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2748bc98a7efSj_mayer #endif
2749bc98a7efSj_mayer     } else {
2750bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2751bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
2752bc98a7efSj_mayer         stq_p(ptr, val);
2753bc98a7efSj_mayer     }
2754bc98a7efSj_mayer }
2755bc98a7efSj_mayer 
27568df1cd07Sbellard /* warning: addr must be aligned */
27578df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
27588df1cd07Sbellard {
27598df1cd07Sbellard     int io_index;
27608df1cd07Sbellard     uint8_t *ptr;
27618df1cd07Sbellard     unsigned long pd;
27628df1cd07Sbellard     PhysPageDesc *p;
27638df1cd07Sbellard 
27648df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
27658df1cd07Sbellard     if (!p) {
27668df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
27678df1cd07Sbellard     } else {
27688df1cd07Sbellard         pd = p->phys_offset;
27698df1cd07Sbellard     }
27708df1cd07Sbellard 
27713a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
27728df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27738df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
27748df1cd07Sbellard     } else {
27758df1cd07Sbellard         unsigned long addr1;
27768df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
27778df1cd07Sbellard         /* RAM case */
27788df1cd07Sbellard         ptr = phys_ram_base + addr1;
27798df1cd07Sbellard         stl_p(ptr, val);
27803a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
27818df1cd07Sbellard             /* invalidate code */
27828df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
27838df1cd07Sbellard             /* set dirty bit */
2784f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2785f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
27868df1cd07Sbellard         }
27878df1cd07Sbellard     }
27883a7d929eSbellard }
27898df1cd07Sbellard 
2790aab33094Sbellard /* XXX: optimize */
2791aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2792aab33094Sbellard {
2793aab33094Sbellard     uint8_t v = val;
2794aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2795aab33094Sbellard }
2796aab33094Sbellard 
2797aab33094Sbellard /* XXX: optimize */
2798aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2799aab33094Sbellard {
2800aab33094Sbellard     uint16_t v = tswap16(val);
2801aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2802aab33094Sbellard }
2803aab33094Sbellard 
2804aab33094Sbellard /* XXX: optimize */
2805aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2806aab33094Sbellard {
2807aab33094Sbellard     val = tswap64(val);
2808aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2809aab33094Sbellard }
2810aab33094Sbellard 
281113eb76e0Sbellard #endif
281213eb76e0Sbellard 
281313eb76e0Sbellard /* virtual memory access for debug */
2814b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2815b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
281613eb76e0Sbellard {
281713eb76e0Sbellard     int l;
28189b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
28199b3c35e0Sj_mayer     target_ulong page;
282013eb76e0Sbellard 
282113eb76e0Sbellard     while (len > 0) {
282213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
282313eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
282413eb76e0Sbellard         /* if no physical page mapped, return an error */
282513eb76e0Sbellard         if (phys_addr == -1)
282613eb76e0Sbellard             return -1;
282713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
282813eb76e0Sbellard         if (l > len)
282913eb76e0Sbellard             l = len;
2830b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2831b448f2f3Sbellard                                buf, l, is_write);
283213eb76e0Sbellard         len -= l;
283313eb76e0Sbellard         buf += l;
283413eb76e0Sbellard         addr += l;
283513eb76e0Sbellard     }
283613eb76e0Sbellard     return 0;
283713eb76e0Sbellard }
283813eb76e0Sbellard 
2839e3db7226Sbellard void dump_exec_info(FILE *f,
2840e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2841e3db7226Sbellard {
2842e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2843e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2844e3db7226Sbellard     TranslationBlock *tb;
2845e3db7226Sbellard 
2846e3db7226Sbellard     target_code_size = 0;
2847e3db7226Sbellard     max_target_code_size = 0;
2848e3db7226Sbellard     cross_page = 0;
2849e3db7226Sbellard     direct_jmp_count = 0;
2850e3db7226Sbellard     direct_jmp2_count = 0;
2851e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2852e3db7226Sbellard         tb = &tbs[i];
2853e3db7226Sbellard         target_code_size += tb->size;
2854e3db7226Sbellard         if (tb->size > max_target_code_size)
2855e3db7226Sbellard             max_target_code_size = tb->size;
2856e3db7226Sbellard         if (tb->page_addr[1] != -1)
2857e3db7226Sbellard             cross_page++;
2858e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2859e3db7226Sbellard             direct_jmp_count++;
2860e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2861e3db7226Sbellard                 direct_jmp2_count++;
2862e3db7226Sbellard             }
2863e3db7226Sbellard         }
2864e3db7226Sbellard     }
2865e3db7226Sbellard     /* XXX: avoid using doubles ? */
2866e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2867e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2868e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2869e3db7226Sbellard                 max_target_code_size);
2870e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2871e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2872e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2873e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2874e3db7226Sbellard             cross_page,
2875e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2876e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2877e3db7226Sbellard                 direct_jmp_count,
2878e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2879e3db7226Sbellard                 direct_jmp2_count,
2880e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2881e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2882e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2883e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2884e3db7226Sbellard }
2885e3db7226Sbellard 
288661382a50Sbellard #if !defined(CONFIG_USER_ONLY)
288761382a50Sbellard 
288861382a50Sbellard #define MMUSUFFIX _cmmu
288961382a50Sbellard #define GETPC() NULL
289061382a50Sbellard #define env cpu_single_env
2891b769d8feSbellard #define SOFTMMU_CODE_ACCESS
289261382a50Sbellard 
289361382a50Sbellard #define SHIFT 0
289461382a50Sbellard #include "softmmu_template.h"
289561382a50Sbellard 
289661382a50Sbellard #define SHIFT 1
289761382a50Sbellard #include "softmmu_template.h"
289861382a50Sbellard 
289961382a50Sbellard #define SHIFT 2
290061382a50Sbellard #include "softmmu_template.h"
290161382a50Sbellard 
290261382a50Sbellard #define SHIFT 3
290361382a50Sbellard #include "softmmu_template.h"
290461382a50Sbellard 
290561382a50Sbellard #undef env
290661382a50Sbellard 
290761382a50Sbellard #endif
2908