xref: /qemu/system/physmem.c (revision ca10f86763f58b7b3667e2ca7d26db3dc810eb20)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
1754936004Sbellard  * License along with this library; if not, write to the Free Software
1854936004Sbellard  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
1954936004Sbellard  */
2067b915a5Sbellard #include "config.h"
21d5a8f07cSbellard #ifdef _WIN32
224fddf62aSths #define WIN32_LEAN_AND_MEAN
23d5a8f07cSbellard #include <windows.h>
24d5a8f07cSbellard #else
25a98d49b1Sbellard #include <sys/types.h>
26d5a8f07cSbellard #include <sys/mman.h>
27d5a8f07cSbellard #endif
2854936004Sbellard #include <stdlib.h>
2954936004Sbellard #include <stdio.h>
3054936004Sbellard #include <stdarg.h>
3154936004Sbellard #include <string.h>
3254936004Sbellard #include <errno.h>
3354936004Sbellard #include <unistd.h>
3454936004Sbellard #include <inttypes.h>
3554936004Sbellard 
366180a181Sbellard #include "cpu.h"
376180a181Sbellard #include "exec-all.h"
38ca10f867Saurel32 #include "qemu-common.h"
3953a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4053a5960aSpbrook #include <qemu.h>
4153a5960aSpbrook #endif
4254936004Sbellard 
43fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4466e85a21Sbellard //#define DEBUG_FLUSH
459fa3e853Sbellard //#define DEBUG_TLB
4667d3b957Spbrook //#define DEBUG_UNASSIGNED
47fd6ce8f6Sbellard 
48fd6ce8f6Sbellard /* make various TB consistency checks */
49fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5098857888Sbellard //#define DEBUG_TLB_CHECK
51fd6ce8f6Sbellard 
521196be37Sths //#define DEBUG_IOPORT
53db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
541196be37Sths 
5599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5699773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5799773bd4Spbrook #undef DEBUG_TB_CHECK
5899773bd4Spbrook #endif
5999773bd4Spbrook 
60fd6ce8f6Sbellard /* threshold to flush the translated code buffer */
61d07bde88Sblueswir1 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62fd6ce8f6Sbellard 
639fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
649fa3e853Sbellard 
659fa3e853Sbellard #define MMAP_AREA_START        0x00000000
669fa3e853Sbellard #define MMAP_AREA_END          0xa8000000
67fd6ce8f6Sbellard 
68108c49b8Sbellard #if defined(TARGET_SPARC64)
69108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
705dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
715dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
73bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
74bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
75108c49b8Sbellard #elif defined(TARGET_PPC64)
76108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
77108c49b8Sbellard #else
78108c49b8Sbellard /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
80108c49b8Sbellard #endif
81108c49b8Sbellard 
82fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
839fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84fd6ce8f6Sbellard int nb_tbs;
85eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
86eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87fd6ce8f6Sbellard 
88b8076a74Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
89fd6ce8f6Sbellard uint8_t *code_gen_ptr;
90fd6ce8f6Sbellard 
919fa3e853Sbellard int phys_ram_size;
929fa3e853Sbellard int phys_ram_fd;
939fa3e853Sbellard uint8_t *phys_ram_base;
941ccde1cbSbellard uint8_t *phys_ram_dirty;
95e9a1ab19Sbellard static ram_addr_t phys_ram_alloc_offset = 0;
969fa3e853Sbellard 
976a00d601Sbellard CPUState *first_cpu;
986a00d601Sbellard /* current CPU in the current thread. It is only valid inside
996a00d601Sbellard    cpu_exec() */
1006a00d601Sbellard CPUState *cpu_single_env;
1016a00d601Sbellard 
10254936004Sbellard typedef struct PageDesc {
10392e873b9Sbellard     /* list of TBs intersecting this ram page */
104fd6ce8f6Sbellard     TranslationBlock *first_tb;
1059fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1069fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1079fa3e853Sbellard     unsigned int code_write_count;
1089fa3e853Sbellard     uint8_t *code_bitmap;
1099fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1109fa3e853Sbellard     unsigned long flags;
1119fa3e853Sbellard #endif
11254936004Sbellard } PageDesc;
11354936004Sbellard 
11492e873b9Sbellard typedef struct PhysPageDesc {
11592e873b9Sbellard     /* offset in host memory of the page + io_index in the low 12 bits */
116e04f40b5Sbellard     uint32_t phys_offset;
11792e873b9Sbellard } PhysPageDesc;
11892e873b9Sbellard 
11954936004Sbellard #define L2_BITS 10
120bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
121bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
122bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
123bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
124bedb69eaSj_mayer  */
125bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
126bedb69eaSj_mayer #else
12754936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128bedb69eaSj_mayer #endif
12954936004Sbellard 
13054936004Sbellard #define L1_SIZE (1 << L1_BITS)
13154936004Sbellard #define L2_SIZE (1 << L2_BITS)
13254936004Sbellard 
13333417e70Sbellard static void io_mem_init(void);
134fd6ce8f6Sbellard 
13583fb7adfSbellard unsigned long qemu_real_host_page_size;
13683fb7adfSbellard unsigned long qemu_host_page_bits;
13783fb7adfSbellard unsigned long qemu_host_page_size;
13883fb7adfSbellard unsigned long qemu_host_page_mask;
13954936004Sbellard 
14092e873b9Sbellard /* XXX: for system emulation, it could just be an array */
14154936004Sbellard static PageDesc *l1_map[L1_SIZE];
1420a962c02Sbellard PhysPageDesc **l1_phys_map;
14354936004Sbellard 
14433417e70Sbellard /* io memory support */
14533417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
14633417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
147a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
14833417e70Sbellard static int io_mem_nb;
1496658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
1506658ffb8Spbrook static int io_mem_watch;
1516658ffb8Spbrook #endif
15233417e70Sbellard 
15334865134Sbellard /* log support */
15434865134Sbellard char *logfilename = "/tmp/qemu.log";
15534865134Sbellard FILE *logfile;
15634865134Sbellard int loglevel;
157e735b91cSpbrook static int log_append = 0;
15834865134Sbellard 
159e3db7226Sbellard /* statistics */
160e3db7226Sbellard static int tlb_flush_count;
161e3db7226Sbellard static int tb_flush_count;
162e3db7226Sbellard static int tb_phys_invalidate_count;
163e3db7226Sbellard 
164db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
165db7b5426Sblueswir1 typedef struct subpage_t {
166db7b5426Sblueswir1     target_phys_addr_t base;
1673ee89922Sblueswir1     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
1683ee89922Sblueswir1     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
1693ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
170db7b5426Sblueswir1 } subpage_t;
171db7b5426Sblueswir1 
172b346ff46Sbellard static void page_init(void)
17354936004Sbellard {
17483fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
17554936004Sbellard        TARGET_PAGE_SIZE */
17667b915a5Sbellard #ifdef _WIN32
177d5a8f07cSbellard     {
178d5a8f07cSbellard         SYSTEM_INFO system_info;
179d5a8f07cSbellard         DWORD old_protect;
180d5a8f07cSbellard 
181d5a8f07cSbellard         GetSystemInfo(&system_info);
182d5a8f07cSbellard         qemu_real_host_page_size = system_info.dwPageSize;
183d5a8f07cSbellard 
184d5a8f07cSbellard         VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
185d5a8f07cSbellard                        PAGE_EXECUTE_READWRITE, &old_protect);
186d5a8f07cSbellard     }
18767b915a5Sbellard #else
18883fb7adfSbellard     qemu_real_host_page_size = getpagesize();
189d5a8f07cSbellard     {
190d5a8f07cSbellard         unsigned long start, end;
191d5a8f07cSbellard 
192d5a8f07cSbellard         start = (unsigned long)code_gen_buffer;
193d5a8f07cSbellard         start &= ~(qemu_real_host_page_size - 1);
194d5a8f07cSbellard 
195d5a8f07cSbellard         end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
196d5a8f07cSbellard         end += qemu_real_host_page_size - 1;
197d5a8f07cSbellard         end &= ~(qemu_real_host_page_size - 1);
198d5a8f07cSbellard 
199d5a8f07cSbellard         mprotect((void *)start, end - start,
200d5a8f07cSbellard                  PROT_READ | PROT_WRITE | PROT_EXEC);
201d5a8f07cSbellard     }
20267b915a5Sbellard #endif
203d5a8f07cSbellard 
20483fb7adfSbellard     if (qemu_host_page_size == 0)
20583fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
20683fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
20783fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
20883fb7adfSbellard     qemu_host_page_bits = 0;
20983fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
21083fb7adfSbellard         qemu_host_page_bits++;
21183fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
212108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
213108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
21450a9569bSbalrog 
21550a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
21650a9569bSbalrog     {
21750a9569bSbalrog         long long startaddr, endaddr;
21850a9569bSbalrog         FILE *f;
21950a9569bSbalrog         int n;
22050a9569bSbalrog 
22150a9569bSbalrog         f = fopen("/proc/self/maps", "r");
22250a9569bSbalrog         if (f) {
22350a9569bSbalrog             do {
22450a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
22550a9569bSbalrog                 if (n == 2) {
22650a9569bSbalrog                     page_set_flags(TARGET_PAGE_ALIGN(startaddr),
22750a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
22850a9569bSbalrog                                    PAGE_RESERVED);
22950a9569bSbalrog                 }
23050a9569bSbalrog             } while (!feof(f));
23150a9569bSbalrog             fclose(f);
23250a9569bSbalrog         }
23350a9569bSbalrog     }
23450a9569bSbalrog #endif
23554936004Sbellard }
23654936004Sbellard 
237fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index)
23854936004Sbellard {
23954936004Sbellard     PageDesc **lp, *p;
24054936004Sbellard 
24154936004Sbellard     lp = &l1_map[index >> L2_BITS];
24254936004Sbellard     p = *lp;
24354936004Sbellard     if (!p) {
24454936004Sbellard         /* allocate if not found */
24559817ccbSbellard         p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
246fd6ce8f6Sbellard         memset(p, 0, sizeof(PageDesc) * L2_SIZE);
24754936004Sbellard         *lp = p;
24854936004Sbellard     }
24954936004Sbellard     return p + (index & (L2_SIZE - 1));
25054936004Sbellard }
25154936004Sbellard 
252fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index)
25354936004Sbellard {
25454936004Sbellard     PageDesc *p;
25554936004Sbellard 
25654936004Sbellard     p = l1_map[index >> L2_BITS];
25754936004Sbellard     if (!p)
25854936004Sbellard         return 0;
259fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
26054936004Sbellard }
26154936004Sbellard 
262108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
26392e873b9Sbellard {
264108c49b8Sbellard     void **lp, **p;
265e3f4e2a4Spbrook     PhysPageDesc *pd;
26692e873b9Sbellard 
267108c49b8Sbellard     p = (void **)l1_phys_map;
268108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
269108c49b8Sbellard 
270108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
271108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
272108c49b8Sbellard #endif
273108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
27492e873b9Sbellard     p = *lp;
27592e873b9Sbellard     if (!p) {
27692e873b9Sbellard         /* allocate if not found */
277108c49b8Sbellard         if (!alloc)
278108c49b8Sbellard             return NULL;
279108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
280108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
281108c49b8Sbellard         *lp = p;
282108c49b8Sbellard     }
283108c49b8Sbellard #endif
284108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
285e3f4e2a4Spbrook     pd = *lp;
286e3f4e2a4Spbrook     if (!pd) {
287e3f4e2a4Spbrook         int i;
288108c49b8Sbellard         /* allocate if not found */
289108c49b8Sbellard         if (!alloc)
290108c49b8Sbellard             return NULL;
291e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
292e3f4e2a4Spbrook         *lp = pd;
293e3f4e2a4Spbrook         for (i = 0; i < L2_SIZE; i++)
294e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
29592e873b9Sbellard     }
296e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
29792e873b9Sbellard }
29892e873b9Sbellard 
299108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
30092e873b9Sbellard {
301108c49b8Sbellard     return phys_page_find_alloc(index, 0);
30292e873b9Sbellard }
30392e873b9Sbellard 
3049fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3056a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3063a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3073a7d929eSbellard                                     target_ulong vaddr);
3089fa3e853Sbellard #endif
309fd6ce8f6Sbellard 
3106a00d601Sbellard void cpu_exec_init(CPUState *env)
311fd6ce8f6Sbellard {
3126a00d601Sbellard     CPUState **penv;
3136a00d601Sbellard     int cpu_index;
3146a00d601Sbellard 
315fd6ce8f6Sbellard     if (!code_gen_ptr) {
31657fec1feSbellard         cpu_gen_init();
317fd6ce8f6Sbellard         code_gen_ptr = code_gen_buffer;
318b346ff46Sbellard         page_init();
31933417e70Sbellard         io_mem_init();
320fd6ce8f6Sbellard     }
3216a00d601Sbellard     env->next_cpu = NULL;
3226a00d601Sbellard     penv = &first_cpu;
3236a00d601Sbellard     cpu_index = 0;
3246a00d601Sbellard     while (*penv != NULL) {
3256a00d601Sbellard         penv = (CPUState **)&(*penv)->next_cpu;
3266a00d601Sbellard         cpu_index++;
3276a00d601Sbellard     }
3286a00d601Sbellard     env->cpu_index = cpu_index;
3296658ffb8Spbrook     env->nb_watchpoints = 0;
3306a00d601Sbellard     *penv = env;
331fd6ce8f6Sbellard }
332fd6ce8f6Sbellard 
3339fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
3349fa3e853Sbellard {
3359fa3e853Sbellard     if (p->code_bitmap) {
33659817ccbSbellard         qemu_free(p->code_bitmap);
3379fa3e853Sbellard         p->code_bitmap = NULL;
3389fa3e853Sbellard     }
3399fa3e853Sbellard     p->code_write_count = 0;
3409fa3e853Sbellard }
3419fa3e853Sbellard 
342fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
343fd6ce8f6Sbellard static void page_flush_tb(void)
344fd6ce8f6Sbellard {
345fd6ce8f6Sbellard     int i, j;
346fd6ce8f6Sbellard     PageDesc *p;
347fd6ce8f6Sbellard 
348fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
349fd6ce8f6Sbellard         p = l1_map[i];
350fd6ce8f6Sbellard         if (p) {
3519fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
3529fa3e853Sbellard                 p->first_tb = NULL;
3539fa3e853Sbellard                 invalidate_page_bitmap(p);
3549fa3e853Sbellard                 p++;
3559fa3e853Sbellard             }
356fd6ce8f6Sbellard         }
357fd6ce8f6Sbellard     }
358fd6ce8f6Sbellard }
359fd6ce8f6Sbellard 
360fd6ce8f6Sbellard /* flush all the translation blocks */
361d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
3626a00d601Sbellard void tb_flush(CPUState *env1)
363fd6ce8f6Sbellard {
3646a00d601Sbellard     CPUState *env;
3650124311eSbellard #if defined(DEBUG_FLUSH)
366ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
367ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
368ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
369ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
370fd6ce8f6Sbellard #endif
371a208e54aSpbrook     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
372a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
373a208e54aSpbrook 
374fd6ce8f6Sbellard     nb_tbs = 0;
3756a00d601Sbellard 
3766a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3778a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
3786a00d601Sbellard     }
3799fa3e853Sbellard 
3808a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
381fd6ce8f6Sbellard     page_flush_tb();
3829fa3e853Sbellard 
383fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
384d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
385d4e8164fSbellard        expensive */
386e3db7226Sbellard     tb_flush_count++;
387fd6ce8f6Sbellard }
388fd6ce8f6Sbellard 
389fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
390fd6ce8f6Sbellard 
391bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
392fd6ce8f6Sbellard {
393fd6ce8f6Sbellard     TranslationBlock *tb;
394fd6ce8f6Sbellard     int i;
395fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
39699773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
39799773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
398fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
399fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
400fd6ce8f6Sbellard                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
40199773bd4Spbrook                        address, (long)tb->pc, tb->size);
402fd6ce8f6Sbellard             }
403fd6ce8f6Sbellard         }
404fd6ce8f6Sbellard     }
405fd6ce8f6Sbellard }
406fd6ce8f6Sbellard 
407fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
408fd6ce8f6Sbellard static void tb_page_check(void)
409fd6ce8f6Sbellard {
410fd6ce8f6Sbellard     TranslationBlock *tb;
411fd6ce8f6Sbellard     int i, flags1, flags2;
412fd6ce8f6Sbellard 
41399773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
41499773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
415fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
416fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
417fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
418fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
41999773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
420fd6ce8f6Sbellard             }
421fd6ce8f6Sbellard         }
422fd6ce8f6Sbellard     }
423fd6ce8f6Sbellard }
424fd6ce8f6Sbellard 
425d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb)
426d4e8164fSbellard {
427d4e8164fSbellard     TranslationBlock *tb1;
428d4e8164fSbellard     unsigned int n1;
429d4e8164fSbellard 
430d4e8164fSbellard     /* suppress any remaining jumps to this TB */
431d4e8164fSbellard     tb1 = tb->jmp_first;
432d4e8164fSbellard     for(;;) {
433d4e8164fSbellard         n1 = (long)tb1 & 3;
434d4e8164fSbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
435d4e8164fSbellard         if (n1 == 2)
436d4e8164fSbellard             break;
437d4e8164fSbellard         tb1 = tb1->jmp_next[n1];
438d4e8164fSbellard     }
439d4e8164fSbellard     /* check end of list */
440d4e8164fSbellard     if (tb1 != tb) {
441d4e8164fSbellard         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
442d4e8164fSbellard     }
443d4e8164fSbellard }
444d4e8164fSbellard 
445fd6ce8f6Sbellard #endif
446fd6ce8f6Sbellard 
447fd6ce8f6Sbellard /* invalidate one TB */
448fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
449fd6ce8f6Sbellard                              int next_offset)
450fd6ce8f6Sbellard {
451fd6ce8f6Sbellard     TranslationBlock *tb1;
452fd6ce8f6Sbellard     for(;;) {
453fd6ce8f6Sbellard         tb1 = *ptb;
454fd6ce8f6Sbellard         if (tb1 == tb) {
455fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
456fd6ce8f6Sbellard             break;
457fd6ce8f6Sbellard         }
458fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
459fd6ce8f6Sbellard     }
460fd6ce8f6Sbellard }
461fd6ce8f6Sbellard 
4629fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
4639fa3e853Sbellard {
4649fa3e853Sbellard     TranslationBlock *tb1;
4659fa3e853Sbellard     unsigned int n1;
4669fa3e853Sbellard 
4679fa3e853Sbellard     for(;;) {
4689fa3e853Sbellard         tb1 = *ptb;
4699fa3e853Sbellard         n1 = (long)tb1 & 3;
4709fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
4719fa3e853Sbellard         if (tb1 == tb) {
4729fa3e853Sbellard             *ptb = tb1->page_next[n1];
4739fa3e853Sbellard             break;
4749fa3e853Sbellard         }
4759fa3e853Sbellard         ptb = &tb1->page_next[n1];
4769fa3e853Sbellard     }
4779fa3e853Sbellard }
4789fa3e853Sbellard 
479d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
480d4e8164fSbellard {
481d4e8164fSbellard     TranslationBlock *tb1, **ptb;
482d4e8164fSbellard     unsigned int n1;
483d4e8164fSbellard 
484d4e8164fSbellard     ptb = &tb->jmp_next[n];
485d4e8164fSbellard     tb1 = *ptb;
486d4e8164fSbellard     if (tb1) {
487d4e8164fSbellard         /* find tb(n) in circular list */
488d4e8164fSbellard         for(;;) {
489d4e8164fSbellard             tb1 = *ptb;
490d4e8164fSbellard             n1 = (long)tb1 & 3;
491d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
492d4e8164fSbellard             if (n1 == n && tb1 == tb)
493d4e8164fSbellard                 break;
494d4e8164fSbellard             if (n1 == 2) {
495d4e8164fSbellard                 ptb = &tb1->jmp_first;
496d4e8164fSbellard             } else {
497d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
498d4e8164fSbellard             }
499d4e8164fSbellard         }
500d4e8164fSbellard         /* now we can suppress tb(n) from the list */
501d4e8164fSbellard         *ptb = tb->jmp_next[n];
502d4e8164fSbellard 
503d4e8164fSbellard         tb->jmp_next[n] = NULL;
504d4e8164fSbellard     }
505d4e8164fSbellard }
506d4e8164fSbellard 
507d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
508d4e8164fSbellard    another TB */
509d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
510d4e8164fSbellard {
511d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
512d4e8164fSbellard }
513d4e8164fSbellard 
5149fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
515fd6ce8f6Sbellard {
5166a00d601Sbellard     CPUState *env;
517fd6ce8f6Sbellard     PageDesc *p;
5188a40a180Sbellard     unsigned int h, n1;
5199fa3e853Sbellard     target_ulong phys_pc;
5208a40a180Sbellard     TranslationBlock *tb1, *tb2;
521fd6ce8f6Sbellard 
5229fa3e853Sbellard     /* remove the TB from the hash list */
5239fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
5249fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
5259fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
5269fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
5279fa3e853Sbellard 
5289fa3e853Sbellard     /* remove the TB from the page list */
5299fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
5309fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
5319fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
5329fa3e853Sbellard         invalidate_page_bitmap(p);
5339fa3e853Sbellard     }
5349fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
5359fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
5369fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
5379fa3e853Sbellard         invalidate_page_bitmap(p);
5389fa3e853Sbellard     }
5399fa3e853Sbellard 
5408a40a180Sbellard     tb_invalidated_flag = 1;
5418a40a180Sbellard 
5428a40a180Sbellard     /* remove the TB from the hash list */
5438a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
5446a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
5456a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
5466a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
5476a00d601Sbellard     }
5488a40a180Sbellard 
5498a40a180Sbellard     /* suppress this TB from the two jump lists */
5508a40a180Sbellard     tb_jmp_remove(tb, 0);
5518a40a180Sbellard     tb_jmp_remove(tb, 1);
5528a40a180Sbellard 
5538a40a180Sbellard     /* suppress any remaining jumps to this TB */
5548a40a180Sbellard     tb1 = tb->jmp_first;
5558a40a180Sbellard     for(;;) {
5568a40a180Sbellard         n1 = (long)tb1 & 3;
5578a40a180Sbellard         if (n1 == 2)
5588a40a180Sbellard             break;
5598a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
5608a40a180Sbellard         tb2 = tb1->jmp_next[n1];
5618a40a180Sbellard         tb_reset_jump(tb1, n1);
5628a40a180Sbellard         tb1->jmp_next[n1] = NULL;
5638a40a180Sbellard         tb1 = tb2;
5648a40a180Sbellard     }
5658a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
5668a40a180Sbellard 
567e3db7226Sbellard     tb_phys_invalidate_count++;
5689fa3e853Sbellard }
5699fa3e853Sbellard 
5709fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
5719fa3e853Sbellard {
5729fa3e853Sbellard     int end, mask, end1;
5739fa3e853Sbellard 
5749fa3e853Sbellard     end = start + len;
5759fa3e853Sbellard     tab += start >> 3;
5769fa3e853Sbellard     mask = 0xff << (start & 7);
5779fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
5789fa3e853Sbellard         if (start < end) {
5799fa3e853Sbellard             mask &= ~(0xff << (end & 7));
5809fa3e853Sbellard             *tab |= mask;
5819fa3e853Sbellard         }
5829fa3e853Sbellard     } else {
5839fa3e853Sbellard         *tab++ |= mask;
5849fa3e853Sbellard         start = (start + 8) & ~7;
5859fa3e853Sbellard         end1 = end & ~7;
5869fa3e853Sbellard         while (start < end1) {
5879fa3e853Sbellard             *tab++ = 0xff;
5889fa3e853Sbellard             start += 8;
5899fa3e853Sbellard         }
5909fa3e853Sbellard         if (start < end) {
5919fa3e853Sbellard             mask = ~(0xff << (end & 7));
5929fa3e853Sbellard             *tab |= mask;
5939fa3e853Sbellard         }
5949fa3e853Sbellard     }
5959fa3e853Sbellard }
5969fa3e853Sbellard 
5979fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
5989fa3e853Sbellard {
5999fa3e853Sbellard     int n, tb_start, tb_end;
6009fa3e853Sbellard     TranslationBlock *tb;
6019fa3e853Sbellard 
60259817ccbSbellard     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
6039fa3e853Sbellard     if (!p->code_bitmap)
6049fa3e853Sbellard         return;
6059fa3e853Sbellard     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
6069fa3e853Sbellard 
6079fa3e853Sbellard     tb = p->first_tb;
6089fa3e853Sbellard     while (tb != NULL) {
6099fa3e853Sbellard         n = (long)tb & 3;
6109fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
6119fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
6129fa3e853Sbellard         if (n == 0) {
6139fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
6149fa3e853Sbellard                it is not a problem */
6159fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
6169fa3e853Sbellard             tb_end = tb_start + tb->size;
6179fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
6189fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
6199fa3e853Sbellard         } else {
6209fa3e853Sbellard             tb_start = 0;
6219fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
6229fa3e853Sbellard         }
6239fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
6249fa3e853Sbellard         tb = tb->page_next[n];
6259fa3e853Sbellard     }
6269fa3e853Sbellard }
6279fa3e853Sbellard 
628d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
629d720b93dSbellard 
630d720b93dSbellard static void tb_gen_code(CPUState *env,
631d720b93dSbellard                         target_ulong pc, target_ulong cs_base, int flags,
632d720b93dSbellard                         int cflags)
633d720b93dSbellard {
634d720b93dSbellard     TranslationBlock *tb;
635d720b93dSbellard     uint8_t *tc_ptr;
636d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
637d720b93dSbellard     int code_gen_size;
638d720b93dSbellard 
639c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
640c27004ecSbellard     tb = tb_alloc(pc);
641d720b93dSbellard     if (!tb) {
642d720b93dSbellard         /* flush must be done */
643d720b93dSbellard         tb_flush(env);
644d720b93dSbellard         /* cannot fail at this point */
645c27004ecSbellard         tb = tb_alloc(pc);
646d720b93dSbellard     }
647d720b93dSbellard     tc_ptr = code_gen_ptr;
648d720b93dSbellard     tb->tc_ptr = tc_ptr;
649d720b93dSbellard     tb->cs_base = cs_base;
650d720b93dSbellard     tb->flags = flags;
651d720b93dSbellard     tb->cflags = cflags;
652d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
653d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
654d720b93dSbellard 
655d720b93dSbellard     /* check next page if needed */
656c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
657d720b93dSbellard     phys_page2 = -1;
658c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
659d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
660d720b93dSbellard     }
661d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
662d720b93dSbellard }
663d720b93dSbellard #endif
664d720b93dSbellard 
6659fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
6669fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
667d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
668d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
669d720b93dSbellard    TB if code is modified inside this TB. */
670d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
671d720b93dSbellard                                    int is_cpu_write_access)
6729fa3e853Sbellard {
673d720b93dSbellard     int n, current_tb_modified, current_tb_not_found, current_flags;
674d720b93dSbellard     CPUState *env = cpu_single_env;
6759fa3e853Sbellard     PageDesc *p;
676ea1c1802Sbellard     TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
6779fa3e853Sbellard     target_ulong tb_start, tb_end;
678d720b93dSbellard     target_ulong current_pc, current_cs_base;
6799fa3e853Sbellard 
6809fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
6819fa3e853Sbellard     if (!p)
6829fa3e853Sbellard         return;
6839fa3e853Sbellard     if (!p->code_bitmap &&
684d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
685d720b93dSbellard         is_cpu_write_access) {
6869fa3e853Sbellard         /* build code bitmap */
6879fa3e853Sbellard         build_page_bitmap(p);
6889fa3e853Sbellard     }
6899fa3e853Sbellard 
6909fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
6919fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
692d720b93dSbellard     current_tb_not_found = is_cpu_write_access;
693d720b93dSbellard     current_tb_modified = 0;
694d720b93dSbellard     current_tb = NULL; /* avoid warning */
695d720b93dSbellard     current_pc = 0; /* avoid warning */
696d720b93dSbellard     current_cs_base = 0; /* avoid warning */
697d720b93dSbellard     current_flags = 0; /* avoid warning */
6989fa3e853Sbellard     tb = p->first_tb;
6999fa3e853Sbellard     while (tb != NULL) {
7009fa3e853Sbellard         n = (long)tb & 3;
7019fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
7029fa3e853Sbellard         tb_next = tb->page_next[n];
7039fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
7049fa3e853Sbellard         if (n == 0) {
7059fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
7069fa3e853Sbellard                it is not a problem */
7079fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
7089fa3e853Sbellard             tb_end = tb_start + tb->size;
7099fa3e853Sbellard         } else {
7109fa3e853Sbellard             tb_start = tb->page_addr[1];
7119fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
7129fa3e853Sbellard         }
7139fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
714d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
715d720b93dSbellard             if (current_tb_not_found) {
716d720b93dSbellard                 current_tb_not_found = 0;
717d720b93dSbellard                 current_tb = NULL;
718d720b93dSbellard                 if (env->mem_write_pc) {
719d720b93dSbellard                     /* now we have a real cpu fault */
720d720b93dSbellard                     current_tb = tb_find_pc(env->mem_write_pc);
721d720b93dSbellard                 }
722d720b93dSbellard             }
723d720b93dSbellard             if (current_tb == tb &&
724d720b93dSbellard                 !(current_tb->cflags & CF_SINGLE_INSN)) {
725d720b93dSbellard                 /* If we are modifying the current TB, we must stop
726d720b93dSbellard                 its execution. We could be more precise by checking
727d720b93dSbellard                 that the modification is after the current PC, but it
728d720b93dSbellard                 would require a specialized function to partially
729d720b93dSbellard                 restore the CPU state */
730d720b93dSbellard 
731d720b93dSbellard                 current_tb_modified = 1;
732d720b93dSbellard                 cpu_restore_state(current_tb, env,
733d720b93dSbellard                                   env->mem_write_pc, NULL);
734d720b93dSbellard #if defined(TARGET_I386)
735d720b93dSbellard                 current_flags = env->hflags;
736d720b93dSbellard                 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
737d720b93dSbellard                 current_cs_base = (target_ulong)env->segs[R_CS].base;
738d720b93dSbellard                 current_pc = current_cs_base + env->eip;
739d720b93dSbellard #else
740d720b93dSbellard #error unsupported CPU
741d720b93dSbellard #endif
742d720b93dSbellard             }
743d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
7446f5a9f7eSbellard             /* we need to do that to handle the case where a signal
7456f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
7466f5a9f7eSbellard             saved_tb = NULL;
7476f5a9f7eSbellard             if (env) {
748ea1c1802Sbellard                 saved_tb = env->current_tb;
749ea1c1802Sbellard                 env->current_tb = NULL;
7506f5a9f7eSbellard             }
7519fa3e853Sbellard             tb_phys_invalidate(tb, -1);
7526f5a9f7eSbellard             if (env) {
753ea1c1802Sbellard                 env->current_tb = saved_tb;
754ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
755ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
7569fa3e853Sbellard             }
7576f5a9f7eSbellard         }
7589fa3e853Sbellard         tb = tb_next;
7599fa3e853Sbellard     }
7609fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
7619fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
7629fa3e853Sbellard     if (!p->first_tb) {
7639fa3e853Sbellard         invalidate_page_bitmap(p);
764d720b93dSbellard         if (is_cpu_write_access) {
765d720b93dSbellard             tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
766d720b93dSbellard         }
767d720b93dSbellard     }
768d720b93dSbellard #endif
769d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
770d720b93dSbellard     if (current_tb_modified) {
771d720b93dSbellard         /* we generate a block containing just the instruction
772d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
773d720b93dSbellard            itself */
774ea1c1802Sbellard         env->current_tb = NULL;
775d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
776d720b93dSbellard                     CF_SINGLE_INSN);
777d720b93dSbellard         cpu_resume_from_signal(env, NULL);
7789fa3e853Sbellard     }
7799fa3e853Sbellard #endif
7809fa3e853Sbellard }
7819fa3e853Sbellard 
7829fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
783d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
7849fa3e853Sbellard {
7859fa3e853Sbellard     PageDesc *p;
7869fa3e853Sbellard     int offset, b;
78759817ccbSbellard #if 0
788a4193c8aSbellard     if (1) {
789a4193c8aSbellard         if (loglevel) {
790a4193c8aSbellard             fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
791a4193c8aSbellard                    cpu_single_env->mem_write_vaddr, len,
792a4193c8aSbellard                    cpu_single_env->eip,
793a4193c8aSbellard                    cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
794a4193c8aSbellard         }
79559817ccbSbellard     }
79659817ccbSbellard #endif
7979fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
7989fa3e853Sbellard     if (!p)
7999fa3e853Sbellard         return;
8009fa3e853Sbellard     if (p->code_bitmap) {
8019fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
8029fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
8039fa3e853Sbellard         if (b & ((1 << len) - 1))
8049fa3e853Sbellard             goto do_invalidate;
8059fa3e853Sbellard     } else {
8069fa3e853Sbellard     do_invalidate:
807d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
8089fa3e853Sbellard     }
8099fa3e853Sbellard }
8109fa3e853Sbellard 
8119fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
812d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr,
813d720b93dSbellard                                     unsigned long pc, void *puc)
8149fa3e853Sbellard {
815d720b93dSbellard     int n, current_flags, current_tb_modified;
816d720b93dSbellard     target_ulong current_pc, current_cs_base;
8179fa3e853Sbellard     PageDesc *p;
818d720b93dSbellard     TranslationBlock *tb, *current_tb;
819d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
820d720b93dSbellard     CPUState *env = cpu_single_env;
821d720b93dSbellard #endif
8229fa3e853Sbellard 
8239fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
8249fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
825fd6ce8f6Sbellard     if (!p)
826fd6ce8f6Sbellard         return;
827fd6ce8f6Sbellard     tb = p->first_tb;
828d720b93dSbellard     current_tb_modified = 0;
829d720b93dSbellard     current_tb = NULL;
830d720b93dSbellard     current_pc = 0; /* avoid warning */
831d720b93dSbellard     current_cs_base = 0; /* avoid warning */
832d720b93dSbellard     current_flags = 0; /* avoid warning */
833d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
834d720b93dSbellard     if (tb && pc != 0) {
835d720b93dSbellard         current_tb = tb_find_pc(pc);
836d720b93dSbellard     }
837d720b93dSbellard #endif
838fd6ce8f6Sbellard     while (tb != NULL) {
8399fa3e853Sbellard         n = (long)tb & 3;
8409fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
841d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
842d720b93dSbellard         if (current_tb == tb &&
843d720b93dSbellard             !(current_tb->cflags & CF_SINGLE_INSN)) {
844d720b93dSbellard                 /* If we are modifying the current TB, we must stop
845d720b93dSbellard                    its execution. We could be more precise by checking
846d720b93dSbellard                    that the modification is after the current PC, but it
847d720b93dSbellard                    would require a specialized function to partially
848d720b93dSbellard                    restore the CPU state */
849d720b93dSbellard 
850d720b93dSbellard             current_tb_modified = 1;
851d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
852d720b93dSbellard #if defined(TARGET_I386)
853d720b93dSbellard             current_flags = env->hflags;
854d720b93dSbellard             current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
855d720b93dSbellard             current_cs_base = (target_ulong)env->segs[R_CS].base;
856d720b93dSbellard             current_pc = current_cs_base + env->eip;
857d720b93dSbellard #else
858d720b93dSbellard #error unsupported CPU
859d720b93dSbellard #endif
860d720b93dSbellard         }
861d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
8629fa3e853Sbellard         tb_phys_invalidate(tb, addr);
8639fa3e853Sbellard         tb = tb->page_next[n];
864fd6ce8f6Sbellard     }
865fd6ce8f6Sbellard     p->first_tb = NULL;
866d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
867d720b93dSbellard     if (current_tb_modified) {
868d720b93dSbellard         /* we generate a block containing just the instruction
869d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
870d720b93dSbellard            itself */
871ea1c1802Sbellard         env->current_tb = NULL;
872d720b93dSbellard         tb_gen_code(env, current_pc, current_cs_base, current_flags,
873d720b93dSbellard                     CF_SINGLE_INSN);
874d720b93dSbellard         cpu_resume_from_signal(env, puc);
875d720b93dSbellard     }
876d720b93dSbellard #endif
877fd6ce8f6Sbellard }
8789fa3e853Sbellard #endif
879fd6ce8f6Sbellard 
880fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
8819fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
88253a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
883fd6ce8f6Sbellard {
884fd6ce8f6Sbellard     PageDesc *p;
8859fa3e853Sbellard     TranslationBlock *last_first_tb;
8869fa3e853Sbellard 
8879fa3e853Sbellard     tb->page_addr[n] = page_addr;
8883a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
8899fa3e853Sbellard     tb->page_next[n] = p->first_tb;
8909fa3e853Sbellard     last_first_tb = p->first_tb;
8919fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
8929fa3e853Sbellard     invalidate_page_bitmap(p);
8939fa3e853Sbellard 
894107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
895d720b93dSbellard 
8969fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
8979fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
89853a5960aSpbrook         target_ulong addr;
89953a5960aSpbrook         PageDesc *p2;
900fd6ce8f6Sbellard         int prot;
901fd6ce8f6Sbellard 
902fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
903fd6ce8f6Sbellard            page fault + mprotect overhead) */
90453a5960aSpbrook         page_addr &= qemu_host_page_mask;
905fd6ce8f6Sbellard         prot = 0;
90653a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
90753a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
90853a5960aSpbrook 
90953a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
91053a5960aSpbrook             if (!p2)
91153a5960aSpbrook                 continue;
91253a5960aSpbrook             prot |= p2->flags;
91353a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
91453a5960aSpbrook             page_get_flags(addr);
91553a5960aSpbrook           }
91653a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
917fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
918fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
919ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
92053a5960aSpbrook                page_addr);
921fd6ce8f6Sbellard #endif
922fd6ce8f6Sbellard     }
9239fa3e853Sbellard #else
9249fa3e853Sbellard     /* if some code is already present, then the pages are already
9259fa3e853Sbellard        protected. So we handle the case where only the first TB is
9269fa3e853Sbellard        allocated in a physical page */
9279fa3e853Sbellard     if (!last_first_tb) {
9286a00d601Sbellard         tlb_protect_code(page_addr);
9299fa3e853Sbellard     }
9309fa3e853Sbellard #endif
931d720b93dSbellard 
932d720b93dSbellard #endif /* TARGET_HAS_SMC */
933fd6ce8f6Sbellard }
934fd6ce8f6Sbellard 
935fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
936fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
937c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
938fd6ce8f6Sbellard {
939fd6ce8f6Sbellard     TranslationBlock *tb;
940fd6ce8f6Sbellard 
941fd6ce8f6Sbellard     if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
942fd6ce8f6Sbellard         (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
943d4e8164fSbellard         return NULL;
944fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
945fd6ce8f6Sbellard     tb->pc = pc;
946b448f2f3Sbellard     tb->cflags = 0;
947d4e8164fSbellard     return tb;
948d4e8164fSbellard }
949d4e8164fSbellard 
9509fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
9519fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
9529fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
9539fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
954d4e8164fSbellard {
9559fa3e853Sbellard     unsigned int h;
9569fa3e853Sbellard     TranslationBlock **ptb;
9579fa3e853Sbellard 
9589fa3e853Sbellard     /* add in the physical hash table */
9599fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9609fa3e853Sbellard     ptb = &tb_phys_hash[h];
9619fa3e853Sbellard     tb->phys_hash_next = *ptb;
9629fa3e853Sbellard     *ptb = tb;
963fd6ce8f6Sbellard 
964fd6ce8f6Sbellard     /* add in the page list */
9659fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
9669fa3e853Sbellard     if (phys_page2 != -1)
9679fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
9689fa3e853Sbellard     else
9699fa3e853Sbellard         tb->page_addr[1] = -1;
9709fa3e853Sbellard 
971d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972d4e8164fSbellard     tb->jmp_next[0] = NULL;
973d4e8164fSbellard     tb->jmp_next[1] = NULL;
974d4e8164fSbellard 
975d4e8164fSbellard     /* init original jump addresses */
976d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
977d4e8164fSbellard         tb_reset_jump(tb, 0);
978d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
979d4e8164fSbellard         tb_reset_jump(tb, 1);
9808a40a180Sbellard 
9818a40a180Sbellard #ifdef DEBUG_TB_CHECK
9828a40a180Sbellard     tb_page_check();
9838a40a180Sbellard #endif
984fd6ce8f6Sbellard }
985fd6ce8f6Sbellard 
986a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
987a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
988a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
989a513fe19Sbellard {
990a513fe19Sbellard     int m_min, m_max, m;
991a513fe19Sbellard     unsigned long v;
992a513fe19Sbellard     TranslationBlock *tb;
993a513fe19Sbellard 
994a513fe19Sbellard     if (nb_tbs <= 0)
995a513fe19Sbellard         return NULL;
996a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
997a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
998a513fe19Sbellard         return NULL;
999a513fe19Sbellard     /* binary search (cf Knuth) */
1000a513fe19Sbellard     m_min = 0;
1001a513fe19Sbellard     m_max = nb_tbs - 1;
1002a513fe19Sbellard     while (m_min <= m_max) {
1003a513fe19Sbellard         m = (m_min + m_max) >> 1;
1004a513fe19Sbellard         tb = &tbs[m];
1005a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1006a513fe19Sbellard         if (v == tc_ptr)
1007a513fe19Sbellard             return tb;
1008a513fe19Sbellard         else if (tc_ptr < v) {
1009a513fe19Sbellard             m_max = m - 1;
1010a513fe19Sbellard         } else {
1011a513fe19Sbellard             m_min = m + 1;
1012a513fe19Sbellard         }
1013a513fe19Sbellard     }
1014a513fe19Sbellard     return &tbs[m_max];
1015a513fe19Sbellard }
10167501267eSbellard 
1017ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1018ea041c0eSbellard 
1019ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1020ea041c0eSbellard {
1021ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1022ea041c0eSbellard     unsigned int n1;
1023ea041c0eSbellard 
1024ea041c0eSbellard     tb1 = tb->jmp_next[n];
1025ea041c0eSbellard     if (tb1 != NULL) {
1026ea041c0eSbellard         /* find head of list */
1027ea041c0eSbellard         for(;;) {
1028ea041c0eSbellard             n1 = (long)tb1 & 3;
1029ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1030ea041c0eSbellard             if (n1 == 2)
1031ea041c0eSbellard                 break;
1032ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1033ea041c0eSbellard         }
1034ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1035ea041c0eSbellard         tb_next = tb1;
1036ea041c0eSbellard 
1037ea041c0eSbellard         /* remove tb from the jmp_first list */
1038ea041c0eSbellard         ptb = &tb_next->jmp_first;
1039ea041c0eSbellard         for(;;) {
1040ea041c0eSbellard             tb1 = *ptb;
1041ea041c0eSbellard             n1 = (long)tb1 & 3;
1042ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1043ea041c0eSbellard             if (n1 == n && tb1 == tb)
1044ea041c0eSbellard                 break;
1045ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1046ea041c0eSbellard         }
1047ea041c0eSbellard         *ptb = tb->jmp_next[n];
1048ea041c0eSbellard         tb->jmp_next[n] = NULL;
1049ea041c0eSbellard 
1050ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1051ea041c0eSbellard         tb_reset_jump(tb, n);
1052ea041c0eSbellard 
10530124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1054ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1055ea041c0eSbellard     }
1056ea041c0eSbellard }
1057ea041c0eSbellard 
1058ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1059ea041c0eSbellard {
1060ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1061ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1062ea041c0eSbellard }
1063ea041c0eSbellard 
10641fddef4bSbellard #if defined(TARGET_HAS_ICE)
1065d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066d720b93dSbellard {
10679b3c35e0Sj_mayer     target_phys_addr_t addr;
10689b3c35e0Sj_mayer     target_ulong pd;
1069c2f07f81Spbrook     ram_addr_t ram_addr;
1070c2f07f81Spbrook     PhysPageDesc *p;
1071d720b93dSbellard 
1072c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1073c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1074c2f07f81Spbrook     if (!p) {
1075c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1076c2f07f81Spbrook     } else {
1077c2f07f81Spbrook         pd = p->phys_offset;
1078c2f07f81Spbrook     }
1079c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1080706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1081d720b93dSbellard }
1082c27004ecSbellard #endif
1083d720b93dSbellard 
10846658ffb8Spbrook /* Add a watchpoint.  */
10856658ffb8Spbrook int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
10866658ffb8Spbrook {
10876658ffb8Spbrook     int i;
10886658ffb8Spbrook 
10896658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
10906658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr)
10916658ffb8Spbrook             return 0;
10926658ffb8Spbrook     }
10936658ffb8Spbrook     if (env->nb_watchpoints >= MAX_WATCHPOINTS)
10946658ffb8Spbrook         return -1;
10956658ffb8Spbrook 
10966658ffb8Spbrook     i = env->nb_watchpoints++;
10976658ffb8Spbrook     env->watchpoint[i].vaddr = addr;
10986658ffb8Spbrook     tlb_flush_page(env, addr);
10996658ffb8Spbrook     /* FIXME: This flush is needed because of the hack to make memory ops
11006658ffb8Spbrook        terminate the TB.  It can be removed once the proper IO trap and
11016658ffb8Spbrook        re-execute bits are in.  */
11026658ffb8Spbrook     tb_flush(env);
11036658ffb8Spbrook     return i;
11046658ffb8Spbrook }
11056658ffb8Spbrook 
11066658ffb8Spbrook /* Remove a watchpoint.  */
11076658ffb8Spbrook int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
11086658ffb8Spbrook {
11096658ffb8Spbrook     int i;
11106658ffb8Spbrook 
11116658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
11126658ffb8Spbrook         if (addr == env->watchpoint[i].vaddr) {
11136658ffb8Spbrook             env->nb_watchpoints--;
11146658ffb8Spbrook             env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
11156658ffb8Spbrook             tlb_flush_page(env, addr);
11166658ffb8Spbrook             return 0;
11176658ffb8Spbrook         }
11186658ffb8Spbrook     }
11196658ffb8Spbrook     return -1;
11206658ffb8Spbrook }
11216658ffb8Spbrook 
1122c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1123c33a346eSbellard    breakpoint is reached */
11242e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
11254c3a88a2Sbellard {
11261fddef4bSbellard #if defined(TARGET_HAS_ICE)
11274c3a88a2Sbellard     int i;
11284c3a88a2Sbellard 
11294c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
11304c3a88a2Sbellard         if (env->breakpoints[i] == pc)
11314c3a88a2Sbellard             return 0;
11324c3a88a2Sbellard     }
11334c3a88a2Sbellard 
11344c3a88a2Sbellard     if (env->nb_breakpoints >= MAX_BREAKPOINTS)
11354c3a88a2Sbellard         return -1;
11364c3a88a2Sbellard     env->breakpoints[env->nb_breakpoints++] = pc;
1137d720b93dSbellard 
1138d720b93dSbellard     breakpoint_invalidate(env, pc);
11394c3a88a2Sbellard     return 0;
11404c3a88a2Sbellard #else
11414c3a88a2Sbellard     return -1;
11424c3a88a2Sbellard #endif
11434c3a88a2Sbellard }
11444c3a88a2Sbellard 
11454c3a88a2Sbellard /* remove a breakpoint */
11462e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
11474c3a88a2Sbellard {
11481fddef4bSbellard #if defined(TARGET_HAS_ICE)
11494c3a88a2Sbellard     int i;
11504c3a88a2Sbellard     for(i = 0; i < env->nb_breakpoints; i++) {
11514c3a88a2Sbellard         if (env->breakpoints[i] == pc)
11524c3a88a2Sbellard             goto found;
11534c3a88a2Sbellard     }
11544c3a88a2Sbellard     return -1;
11554c3a88a2Sbellard  found:
11564c3a88a2Sbellard     env->nb_breakpoints--;
11571fddef4bSbellard     if (i < env->nb_breakpoints)
11581fddef4bSbellard       env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1159d720b93dSbellard 
1160d720b93dSbellard     breakpoint_invalidate(env, pc);
11614c3a88a2Sbellard     return 0;
11624c3a88a2Sbellard #else
11634c3a88a2Sbellard     return -1;
11644c3a88a2Sbellard #endif
11654c3a88a2Sbellard }
11664c3a88a2Sbellard 
1167c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1168c33a346eSbellard    CPU loop after each instruction */
1169c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1170c33a346eSbellard {
11711fddef4bSbellard #if defined(TARGET_HAS_ICE)
1172c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1173c33a346eSbellard         env->singlestep_enabled = enabled;
1174c33a346eSbellard         /* must flush all the translated code to avoid inconsistancies */
11759fa3e853Sbellard         /* XXX: only flush what is necessary */
11760124311eSbellard         tb_flush(env);
1177c33a346eSbellard     }
1178c33a346eSbellard #endif
1179c33a346eSbellard }
1180c33a346eSbellard 
118134865134Sbellard /* enable or disable low levels log */
118234865134Sbellard void cpu_set_log(int log_flags)
118334865134Sbellard {
118434865134Sbellard     loglevel = log_flags;
118534865134Sbellard     if (loglevel && !logfile) {
118611fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
118734865134Sbellard         if (!logfile) {
118834865134Sbellard             perror(logfilename);
118934865134Sbellard             _exit(1);
119034865134Sbellard         }
11919fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
11929fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
11939fa3e853Sbellard         {
11949fa3e853Sbellard             static uint8_t logfile_buf[4096];
11959fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
11969fa3e853Sbellard         }
11979fa3e853Sbellard #else
119834865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
11999fa3e853Sbellard #endif
1200e735b91cSpbrook         log_append = 1;
1201e735b91cSpbrook     }
1202e735b91cSpbrook     if (!loglevel && logfile) {
1203e735b91cSpbrook         fclose(logfile);
1204e735b91cSpbrook         logfile = NULL;
120534865134Sbellard     }
120634865134Sbellard }
120734865134Sbellard 
120834865134Sbellard void cpu_set_log_filename(const char *filename)
120934865134Sbellard {
121034865134Sbellard     logfilename = strdup(filename);
1211e735b91cSpbrook     if (logfile) {
1212e735b91cSpbrook         fclose(logfile);
1213e735b91cSpbrook         logfile = NULL;
1214e735b91cSpbrook     }
1215e735b91cSpbrook     cpu_set_log(loglevel);
121634865134Sbellard }
1217c33a346eSbellard 
12180124311eSbellard /* mask must never be zero, except for A20 change call */
121968a79315Sbellard void cpu_interrupt(CPUState *env, int mask)
1220ea041c0eSbellard {
1221ea041c0eSbellard     TranslationBlock *tb;
122215a51156Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1223ea041c0eSbellard 
122468a79315Sbellard     env->interrupt_request |= mask;
1225ea041c0eSbellard     /* if the cpu is currently executing code, we must unlink it and
1226ea041c0eSbellard        all the potentially executing TB */
1227ea041c0eSbellard     tb = env->current_tb;
1228ee8b7021Sbellard     if (tb && !testandset(&interrupt_lock)) {
1229ee8b7021Sbellard         env->current_tb = NULL;
1230ea041c0eSbellard         tb_reset_jump_recursive(tb);
123115a51156Saurel32         resetlock(&interrupt_lock);
1232ea041c0eSbellard     }
1233ea041c0eSbellard }
1234ea041c0eSbellard 
1235b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1236b54ad049Sbellard {
1237b54ad049Sbellard     env->interrupt_request &= ~mask;
1238b54ad049Sbellard }
1239b54ad049Sbellard 
1240f193c797Sbellard CPULogItem cpu_log_items[] = {
1241f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1242f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1243f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1244f193c797Sbellard       "show target assembly code for each compiled TB" },
1245f193c797Sbellard     { CPU_LOG_TB_OP, "op",
124657fec1feSbellard       "show micro ops for each compiled TB" },
1247f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1248e01a1157Sblueswir1       "show micro ops "
1249e01a1157Sblueswir1 #ifdef TARGET_I386
1250e01a1157Sblueswir1       "before eflags optimization and "
1251f193c797Sbellard #endif
1252e01a1157Sblueswir1       "after liveness analysis" },
1253f193c797Sbellard     { CPU_LOG_INT, "int",
1254f193c797Sbellard       "show interrupts/exceptions in short format" },
1255f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1256f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
12579fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1258e91c8a77Sths       "show CPU state before block translation" },
1259f193c797Sbellard #ifdef TARGET_I386
1260f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1261f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1262f193c797Sbellard #endif
12638e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1264fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1265fd872598Sbellard       "show all i/o ports accesses" },
12668e3a9fd2Sbellard #endif
1267f193c797Sbellard     { 0, NULL, NULL },
1268f193c797Sbellard };
1269f193c797Sbellard 
1270f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1271f193c797Sbellard {
1272f193c797Sbellard     if (strlen(s2) != n)
1273f193c797Sbellard         return 0;
1274f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1275f193c797Sbellard }
1276f193c797Sbellard 
1277f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1278f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1279f193c797Sbellard {
1280f193c797Sbellard     CPULogItem *item;
1281f193c797Sbellard     int mask;
1282f193c797Sbellard     const char *p, *p1;
1283f193c797Sbellard 
1284f193c797Sbellard     p = str;
1285f193c797Sbellard     mask = 0;
1286f193c797Sbellard     for(;;) {
1287f193c797Sbellard         p1 = strchr(p, ',');
1288f193c797Sbellard         if (!p1)
1289f193c797Sbellard             p1 = p + strlen(p);
12908e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
12918e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
12928e3a9fd2Sbellard 			mask |= item->mask;
12938e3a9fd2Sbellard 		}
12948e3a9fd2Sbellard 	} else {
1295f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1296f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1297f193c797Sbellard                 goto found;
1298f193c797Sbellard         }
1299f193c797Sbellard         return 0;
13008e3a9fd2Sbellard 	}
1301f193c797Sbellard     found:
1302f193c797Sbellard         mask |= item->mask;
1303f193c797Sbellard         if (*p1 != ',')
1304f193c797Sbellard             break;
1305f193c797Sbellard         p = p1 + 1;
1306f193c797Sbellard     }
1307f193c797Sbellard     return mask;
1308f193c797Sbellard }
1309ea041c0eSbellard 
13107501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
13117501267eSbellard {
13127501267eSbellard     va_list ap;
1313493ae1f0Spbrook     va_list ap2;
13147501267eSbellard 
13157501267eSbellard     va_start(ap, fmt);
1316493ae1f0Spbrook     va_copy(ap2, ap);
13177501267eSbellard     fprintf(stderr, "qemu: fatal: ");
13187501267eSbellard     vfprintf(stderr, fmt, ap);
13197501267eSbellard     fprintf(stderr, "\n");
13207501267eSbellard #ifdef TARGET_I386
13210573fbfcSths     if(env->intercept & INTERCEPT_SVM_MASK) {
13220573fbfcSths 	/* most probably the virtual machine should not
13230573fbfcSths 	   be shut down but rather caught by the VMM */
13240573fbfcSths         vmexit(SVM_EXIT_SHUTDOWN, 0);
13250573fbfcSths     }
13267fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
13277fe48483Sbellard #else
13287fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
13297501267eSbellard #endif
1330924edcaeSbalrog     if (logfile) {
1331f9373291Sj_mayer         fprintf(logfile, "qemu: fatal: ");
1332493ae1f0Spbrook         vfprintf(logfile, fmt, ap2);
1333f9373291Sj_mayer         fprintf(logfile, "\n");
1334f9373291Sj_mayer #ifdef TARGET_I386
1335f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1336f9373291Sj_mayer #else
1337f9373291Sj_mayer         cpu_dump_state(env, logfile, fprintf, 0);
1338f9373291Sj_mayer #endif
1339924edcaeSbalrog         fflush(logfile);
1340924edcaeSbalrog         fclose(logfile);
1341924edcaeSbalrog     }
1342493ae1f0Spbrook     va_end(ap2);
1343f9373291Sj_mayer     va_end(ap);
13447501267eSbellard     abort();
13457501267eSbellard }
13467501267eSbellard 
1347c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1348c5be9f08Sths {
134901ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1350c5be9f08Sths     /* preserve chaining and index */
1351c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1352c5be9f08Sths     int cpu_index = new_env->cpu_index;
1353c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
1354c5be9f08Sths     new_env->next_cpu = next_cpu;
1355c5be9f08Sths     new_env->cpu_index = cpu_index;
1356c5be9f08Sths     return new_env;
1357c5be9f08Sths }
1358c5be9f08Sths 
13590124311eSbellard #if !defined(CONFIG_USER_ONLY)
13600124311eSbellard 
1361ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1362ee8b7021Sbellard    implemented yet) */
1363ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
136433417e70Sbellard {
136533417e70Sbellard     int i;
13660124311eSbellard 
13679fa3e853Sbellard #if defined(DEBUG_TLB)
13689fa3e853Sbellard     printf("tlb_flush:\n");
13699fa3e853Sbellard #endif
13700124311eSbellard     /* must reset current TB so that interrupts cannot modify the
13710124311eSbellard        links while we are modifying them */
13720124311eSbellard     env->current_tb = NULL;
13730124311eSbellard 
137433417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
137584b7b8e7Sbellard         env->tlb_table[0][i].addr_read = -1;
137684b7b8e7Sbellard         env->tlb_table[0][i].addr_write = -1;
137784b7b8e7Sbellard         env->tlb_table[0][i].addr_code = -1;
137884b7b8e7Sbellard         env->tlb_table[1][i].addr_read = -1;
137984b7b8e7Sbellard         env->tlb_table[1][i].addr_write = -1;
138084b7b8e7Sbellard         env->tlb_table[1][i].addr_code = -1;
13816fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
13826fa4cea9Sj_mayer         env->tlb_table[2][i].addr_read = -1;
13836fa4cea9Sj_mayer         env->tlb_table[2][i].addr_write = -1;
13846fa4cea9Sj_mayer         env->tlb_table[2][i].addr_code = -1;
13856fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
13866fa4cea9Sj_mayer         env->tlb_table[3][i].addr_read = -1;
13876fa4cea9Sj_mayer         env->tlb_table[3][i].addr_write = -1;
13886fa4cea9Sj_mayer         env->tlb_table[3][i].addr_code = -1;
13896fa4cea9Sj_mayer #endif
13906fa4cea9Sj_mayer #endif
139133417e70Sbellard     }
13929fa3e853Sbellard 
13938a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
13949fa3e853Sbellard 
13959fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
13969fa3e853Sbellard     munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
13979fa3e853Sbellard #endif
13980a962c02Sbellard #ifdef USE_KQEMU
13990a962c02Sbellard     if (env->kqemu_enabled) {
14000a962c02Sbellard         kqemu_flush(env, flush_global);
14010a962c02Sbellard     }
14020a962c02Sbellard #endif
1403e3db7226Sbellard     tlb_flush_count++;
140433417e70Sbellard }
140533417e70Sbellard 
1406274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
140761382a50Sbellard {
140884b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
140984b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
141084b7b8e7Sbellard         addr == (tlb_entry->addr_write &
141184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
141284b7b8e7Sbellard         addr == (tlb_entry->addr_code &
141384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
141484b7b8e7Sbellard         tlb_entry->addr_read = -1;
141584b7b8e7Sbellard         tlb_entry->addr_write = -1;
141684b7b8e7Sbellard         tlb_entry->addr_code = -1;
141784b7b8e7Sbellard     }
141861382a50Sbellard }
141961382a50Sbellard 
14202e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
142133417e70Sbellard {
14228a40a180Sbellard     int i;
14239fa3e853Sbellard     TranslationBlock *tb;
14240124311eSbellard 
14259fa3e853Sbellard #if defined(DEBUG_TLB)
1426108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
14279fa3e853Sbellard #endif
14280124311eSbellard     /* must reset current TB so that interrupts cannot modify the
14290124311eSbellard        links while we are modifying them */
14300124311eSbellard     env->current_tb = NULL;
143133417e70Sbellard 
143261382a50Sbellard     addr &= TARGET_PAGE_MASK;
143333417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
143484b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[0][i], addr);
143584b7b8e7Sbellard     tlb_flush_entry(&env->tlb_table[1][i], addr);
14366fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
14376fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[2][i], addr);
14386fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
14396fa4cea9Sj_mayer     tlb_flush_entry(&env->tlb_table[3][i], addr);
14406fa4cea9Sj_mayer #endif
14416fa4cea9Sj_mayer #endif
14420124311eSbellard 
1443b362e5e0Spbrook     /* Discard jump cache entries for any tb which might potentially
1444b362e5e0Spbrook        overlap the flushed page.  */
1445b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1446b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1447b362e5e0Spbrook 
1448b362e5e0Spbrook     i = tb_jmp_cache_hash_page(addr);
1449b362e5e0Spbrook     memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
145061382a50Sbellard 
14519fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14529fa3e853Sbellard     if (addr < MMAP_AREA_END)
14539fa3e853Sbellard         munmap((void *)addr, TARGET_PAGE_SIZE);
14549fa3e853Sbellard #endif
14550a962c02Sbellard #ifdef USE_KQEMU
14560a962c02Sbellard     if (env->kqemu_enabled) {
14570a962c02Sbellard         kqemu_flush_page(env, addr);
14580a962c02Sbellard     }
14590a962c02Sbellard #endif
14609fa3e853Sbellard }
14619fa3e853Sbellard 
14629fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
14639fa3e853Sbellard    can be detected */
14646a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
146561382a50Sbellard {
14666a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
14676a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
14686a00d601Sbellard                                     CODE_DIRTY_FLAG);
14699fa3e853Sbellard }
14709fa3e853Sbellard 
14719fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
14723a7d929eSbellard    tested for self modifying code */
14733a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
14743a7d929eSbellard                                     target_ulong vaddr)
14759fa3e853Sbellard {
14763a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
14779fa3e853Sbellard }
14789fa3e853Sbellard 
14791ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
14801ccde1cbSbellard                                          unsigned long start, unsigned long length)
14811ccde1cbSbellard {
14821ccde1cbSbellard     unsigned long addr;
148384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
148484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
14851ccde1cbSbellard         if ((addr - start) < length) {
148684b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
14871ccde1cbSbellard         }
14881ccde1cbSbellard     }
14891ccde1cbSbellard }
14901ccde1cbSbellard 
14913a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
14920a962c02Sbellard                                      int dirty_flags)
14931ccde1cbSbellard {
14941ccde1cbSbellard     CPUState *env;
14954f2ac237Sbellard     unsigned long length, start1;
14960a962c02Sbellard     int i, mask, len;
14970a962c02Sbellard     uint8_t *p;
14981ccde1cbSbellard 
14991ccde1cbSbellard     start &= TARGET_PAGE_MASK;
15001ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
15011ccde1cbSbellard 
15021ccde1cbSbellard     length = end - start;
15031ccde1cbSbellard     if (length == 0)
15041ccde1cbSbellard         return;
15050a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
15063a7d929eSbellard #ifdef USE_KQEMU
15076a00d601Sbellard     /* XXX: should not depend on cpu context */
15086a00d601Sbellard     env = first_cpu;
15093a7d929eSbellard     if (env->kqemu_enabled) {
1510f23db169Sbellard         ram_addr_t addr;
1511f23db169Sbellard         addr = start;
1512f23db169Sbellard         for(i = 0; i < len; i++) {
1513f23db169Sbellard             kqemu_set_notdirty(env, addr);
1514f23db169Sbellard             addr += TARGET_PAGE_SIZE;
1515f23db169Sbellard         }
15163a7d929eSbellard     }
15173a7d929eSbellard #endif
1518f23db169Sbellard     mask = ~dirty_flags;
1519f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1520f23db169Sbellard     for(i = 0; i < len; i++)
1521f23db169Sbellard         p[i] &= mask;
1522f23db169Sbellard 
15231ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
15241ccde1cbSbellard        when accessing the range */
152559817ccbSbellard     start1 = start + (unsigned long)phys_ram_base;
15266a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
15271ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
152884b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
15291ccde1cbSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
153084b7b8e7Sbellard             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
15316fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15326fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
15336fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
15346fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15356fa4cea9Sj_mayer         for(i = 0; i < CPU_TLB_SIZE; i++)
15366fa4cea9Sj_mayer             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
15376fa4cea9Sj_mayer #endif
15386fa4cea9Sj_mayer #endif
15396a00d601Sbellard     }
154059817ccbSbellard 
154159817ccbSbellard #if !defined(CONFIG_SOFTMMU)
154259817ccbSbellard     /* XXX: this is expensive */
154359817ccbSbellard     {
154459817ccbSbellard         VirtPageDesc *p;
154559817ccbSbellard         int j;
154659817ccbSbellard         target_ulong addr;
154759817ccbSbellard 
154859817ccbSbellard         for(i = 0; i < L1_SIZE; i++) {
154959817ccbSbellard             p = l1_virt_map[i];
155059817ccbSbellard             if (p) {
155159817ccbSbellard                 addr = i << (TARGET_PAGE_BITS + L2_BITS);
155259817ccbSbellard                 for(j = 0; j < L2_SIZE; j++) {
155359817ccbSbellard                     if (p->valid_tag == virt_valid_tag &&
155459817ccbSbellard                         p->phys_addr >= start && p->phys_addr < end &&
155559817ccbSbellard                         (p->prot & PROT_WRITE)) {
155659817ccbSbellard                         if (addr < MMAP_AREA_END) {
155759817ccbSbellard                             mprotect((void *)addr, TARGET_PAGE_SIZE,
155859817ccbSbellard                                      p->prot & ~PROT_WRITE);
155959817ccbSbellard                         }
156059817ccbSbellard                     }
156159817ccbSbellard                     addr += TARGET_PAGE_SIZE;
156259817ccbSbellard                     p++;
156359817ccbSbellard                 }
156459817ccbSbellard             }
156559817ccbSbellard         }
156659817ccbSbellard     }
156759817ccbSbellard #endif
15681ccde1cbSbellard }
15691ccde1cbSbellard 
15703a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
15713a7d929eSbellard {
15723a7d929eSbellard     ram_addr_t ram_addr;
15733a7d929eSbellard 
157484b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
157584b7b8e7Sbellard         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
15763a7d929eSbellard             tlb_entry->addend - (unsigned long)phys_ram_base;
15773a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
157884b7b8e7Sbellard             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
15793a7d929eSbellard         }
15803a7d929eSbellard     }
15813a7d929eSbellard }
15823a7d929eSbellard 
15833a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
15843a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
15853a7d929eSbellard {
15863a7d929eSbellard     int i;
15873a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
158884b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[0][i]);
15893a7d929eSbellard     for(i = 0; i < CPU_TLB_SIZE; i++)
159084b7b8e7Sbellard         tlb_update_dirty(&env->tlb_table[1][i]);
15916fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
15926fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15936fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[2][i]);
15946fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
15956fa4cea9Sj_mayer     for(i = 0; i < CPU_TLB_SIZE; i++)
15966fa4cea9Sj_mayer         tlb_update_dirty(&env->tlb_table[3][i]);
15976fa4cea9Sj_mayer #endif
15986fa4cea9Sj_mayer #endif
15993a7d929eSbellard }
16003a7d929eSbellard 
16011ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
16021ccde1cbSbellard                                   unsigned long start)
16031ccde1cbSbellard {
16041ccde1cbSbellard     unsigned long addr;
160584b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
160684b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
16071ccde1cbSbellard         if (addr == start) {
160884b7b8e7Sbellard             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
16091ccde1cbSbellard         }
16101ccde1cbSbellard     }
16111ccde1cbSbellard }
16121ccde1cbSbellard 
16131ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr
16141ccde1cbSbellard    addr so that it is no longer dirty */
16156a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
16166a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
16171ccde1cbSbellard {
16181ccde1cbSbellard     int i;
16191ccde1cbSbellard 
16201ccde1cbSbellard     addr &= TARGET_PAGE_MASK;
16211ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
162284b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[0][i], addr);
162384b7b8e7Sbellard     tlb_set_dirty1(&env->tlb_table[1][i], addr);
16246fa4cea9Sj_mayer #if (NB_MMU_MODES >= 3)
16256fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[2][i], addr);
16266fa4cea9Sj_mayer #if (NB_MMU_MODES == 4)
16276fa4cea9Sj_mayer     tlb_set_dirty1(&env->tlb_table[3][i], addr);
16286fa4cea9Sj_mayer #endif
16296fa4cea9Sj_mayer #endif
16301ccde1cbSbellard }
16311ccde1cbSbellard 
163259817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
163359817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
163459817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
163559817ccbSbellard    conflicting with the host address space). */
163684b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
16372e12669aSbellard                       target_phys_addr_t paddr, int prot,
16386ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
16399fa3e853Sbellard {
164092e873b9Sbellard     PhysPageDesc *p;
16414f2ac237Sbellard     unsigned long pd;
16429fa3e853Sbellard     unsigned int index;
16434f2ac237Sbellard     target_ulong address;
1644108c49b8Sbellard     target_phys_addr_t addend;
16459fa3e853Sbellard     int ret;
164684b7b8e7Sbellard     CPUTLBEntry *te;
16476658ffb8Spbrook     int i;
16489fa3e853Sbellard 
164992e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
16509fa3e853Sbellard     if (!p) {
16519fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
16529fa3e853Sbellard     } else {
16539fa3e853Sbellard         pd = p->phys_offset;
16549fa3e853Sbellard     }
16559fa3e853Sbellard #if defined(DEBUG_TLB)
16566ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
16576ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
16589fa3e853Sbellard #endif
16599fa3e853Sbellard 
16609fa3e853Sbellard     ret = 0;
16619fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
16629fa3e853Sbellard     if (is_softmmu)
16639fa3e853Sbellard #endif
16649fa3e853Sbellard     {
16652a4188a3Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
16669fa3e853Sbellard             /* IO memory case */
16679fa3e853Sbellard             address = vaddr | pd;
16689fa3e853Sbellard             addend = paddr;
16699fa3e853Sbellard         } else {
16709fa3e853Sbellard             /* standard memory */
16719fa3e853Sbellard             address = vaddr;
16729fa3e853Sbellard             addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
16739fa3e853Sbellard         }
16749fa3e853Sbellard 
16756658ffb8Spbrook         /* Make accesses to pages with watchpoints go via the
16766658ffb8Spbrook            watchpoint trap routines.  */
16776658ffb8Spbrook         for (i = 0; i < env->nb_watchpoints; i++) {
16786658ffb8Spbrook             if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
16796658ffb8Spbrook                 if (address & ~TARGET_PAGE_MASK) {
1680d79acba4Sbalrog                     env->watchpoint[i].addend = 0;
16816658ffb8Spbrook                     address = vaddr | io_mem_watch;
16826658ffb8Spbrook                 } else {
1683d79acba4Sbalrog                     env->watchpoint[i].addend = pd - paddr +
1684d79acba4Sbalrog                         (unsigned long) phys_ram_base;
16856658ffb8Spbrook                     /* TODO: Figure out how to make read watchpoints coexist
16866658ffb8Spbrook                        with code.  */
16876658ffb8Spbrook                     pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
16886658ffb8Spbrook                 }
16896658ffb8Spbrook             }
16906658ffb8Spbrook         }
16916658ffb8Spbrook 
169290f18422Sbellard         index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
16939fa3e853Sbellard         addend -= vaddr;
16946ebbf390Sj_mayer         te = &env->tlb_table[mmu_idx][index];
169584b7b8e7Sbellard         te->addend = addend;
169667b915a5Sbellard         if (prot & PAGE_READ) {
169784b7b8e7Sbellard             te->addr_read = address;
16989fa3e853Sbellard         } else {
169984b7b8e7Sbellard             te->addr_read = -1;
170084b7b8e7Sbellard         }
170184b7b8e7Sbellard         if (prot & PAGE_EXEC) {
170284b7b8e7Sbellard             te->addr_code = address;
170384b7b8e7Sbellard         } else {
170484b7b8e7Sbellard             te->addr_code = -1;
17059fa3e853Sbellard         }
170667b915a5Sbellard         if (prot & PAGE_WRITE) {
1707856074ecSbellard             if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1708856074ecSbellard                 (pd & IO_MEM_ROMD)) {
1709856074ecSbellard                 /* write access calls the I/O callback */
1710856074ecSbellard                 te->addr_write = vaddr |
1711856074ecSbellard                     (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
17123a7d929eSbellard             } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
17131ccde1cbSbellard                        !cpu_physical_memory_is_dirty(pd)) {
171484b7b8e7Sbellard                 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
17159fa3e853Sbellard             } else {
171684b7b8e7Sbellard                 te->addr_write = address;
17179fa3e853Sbellard             }
17189fa3e853Sbellard         } else {
171984b7b8e7Sbellard             te->addr_write = -1;
17209fa3e853Sbellard         }
17219fa3e853Sbellard     }
17229fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
17239fa3e853Sbellard     else {
17249fa3e853Sbellard         if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
17259fa3e853Sbellard             /* IO access: no mapping is done as it will be handled by the
17269fa3e853Sbellard                soft MMU */
17279fa3e853Sbellard             if (!(env->hflags & HF_SOFTMMU_MASK))
17289fa3e853Sbellard                 ret = 2;
17299fa3e853Sbellard         } else {
17309fa3e853Sbellard             void *map_addr;
173159817ccbSbellard 
173259817ccbSbellard             if (vaddr >= MMAP_AREA_END) {
173359817ccbSbellard                 ret = 2;
173459817ccbSbellard             } else {
17359fa3e853Sbellard                 if (prot & PROT_WRITE) {
173659817ccbSbellard                     if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1737d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1
173859817ccbSbellard                         first_tb ||
1739d720b93dSbellard #endif
174059817ccbSbellard                         ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
174159817ccbSbellard                          !cpu_physical_memory_is_dirty(pd))) {
17429fa3e853Sbellard                         /* ROM: we do as if code was inside */
17439fa3e853Sbellard                         /* if code is present, we only map as read only and save the
17449fa3e853Sbellard                            original mapping */
17459fa3e853Sbellard                         VirtPageDesc *vp;
17469fa3e853Sbellard 
174790f18422Sbellard                         vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
17489fa3e853Sbellard                         vp->phys_addr = pd;
17499fa3e853Sbellard                         vp->prot = prot;
17509fa3e853Sbellard                         vp->valid_tag = virt_valid_tag;
17519fa3e853Sbellard                         prot &= ~PAGE_WRITE;
17529fa3e853Sbellard                     }
17539fa3e853Sbellard                 }
17549fa3e853Sbellard                 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
17559fa3e853Sbellard                                 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
17569fa3e853Sbellard                 if (map_addr == MAP_FAILED) {
17579fa3e853Sbellard                     cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
17589fa3e853Sbellard                               paddr, vaddr);
17599fa3e853Sbellard                 }
17609fa3e853Sbellard             }
17619fa3e853Sbellard         }
176259817ccbSbellard     }
17639fa3e853Sbellard #endif
17649fa3e853Sbellard     return ret;
17659fa3e853Sbellard }
17669fa3e853Sbellard 
17679fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
17689fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
176953a5960aSpbrook int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
17709fa3e853Sbellard {
17719fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
17729fa3e853Sbellard     VirtPageDesc *vp;
17739fa3e853Sbellard 
17749fa3e853Sbellard #if defined(DEBUG_TLB)
17759fa3e853Sbellard     printf("page_unprotect: addr=0x%08x\n", addr);
17769fa3e853Sbellard #endif
17779fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
177859817ccbSbellard 
177959817ccbSbellard     /* if it is not mapped, no need to worry here */
178059817ccbSbellard     if (addr >= MMAP_AREA_END)
178159817ccbSbellard         return 0;
17829fa3e853Sbellard     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
17839fa3e853Sbellard     if (!vp)
17849fa3e853Sbellard         return 0;
17859fa3e853Sbellard     /* NOTE: in this case, validate_tag is _not_ tested as it
17869fa3e853Sbellard        validates only the code TLB */
17879fa3e853Sbellard     if (vp->valid_tag != virt_valid_tag)
17889fa3e853Sbellard         return 0;
17899fa3e853Sbellard     if (!(vp->prot & PAGE_WRITE))
17909fa3e853Sbellard         return 0;
17919fa3e853Sbellard #if defined(DEBUG_TLB)
17929fa3e853Sbellard     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
17939fa3e853Sbellard            addr, vp->phys_addr, vp->prot);
17949fa3e853Sbellard #endif
179559817ccbSbellard     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
179659817ccbSbellard         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
179759817ccbSbellard                   (unsigned long)addr, vp->prot);
1798d720b93dSbellard     /* set the dirty bit */
17990a962c02Sbellard     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1800d720b93dSbellard     /* flush the code inside */
1801d720b93dSbellard     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
18029fa3e853Sbellard     return 1;
18039fa3e853Sbellard #else
18049fa3e853Sbellard     return 0;
18059fa3e853Sbellard #endif
180633417e70Sbellard }
180733417e70Sbellard 
18080124311eSbellard #else
18090124311eSbellard 
1810ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
18110124311eSbellard {
18120124311eSbellard }
18130124311eSbellard 
18142e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
18150124311eSbellard {
18160124311eSbellard }
18170124311eSbellard 
181884b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
18192e12669aSbellard                       target_phys_addr_t paddr, int prot,
18206ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
182133417e70Sbellard {
18229fa3e853Sbellard     return 0;
182333417e70Sbellard }
182433417e70Sbellard 
18259fa3e853Sbellard /* dump memory mappings */
18269fa3e853Sbellard void page_dump(FILE *f)
182733417e70Sbellard {
18289fa3e853Sbellard     unsigned long start, end;
18299fa3e853Sbellard     int i, j, prot, prot1;
18309fa3e853Sbellard     PageDesc *p;
18319fa3e853Sbellard 
18329fa3e853Sbellard     fprintf(f, "%-8s %-8s %-8s %s\n",
18339fa3e853Sbellard             "start", "end", "size", "prot");
18349fa3e853Sbellard     start = -1;
18359fa3e853Sbellard     end = -1;
18369fa3e853Sbellard     prot = 0;
18379fa3e853Sbellard     for(i = 0; i <= L1_SIZE; i++) {
18389fa3e853Sbellard         if (i < L1_SIZE)
18399fa3e853Sbellard             p = l1_map[i];
18409fa3e853Sbellard         else
18419fa3e853Sbellard             p = NULL;
18429fa3e853Sbellard         for(j = 0;j < L2_SIZE; j++) {
184333417e70Sbellard             if (!p)
18449fa3e853Sbellard                 prot1 = 0;
18459fa3e853Sbellard             else
18469fa3e853Sbellard                 prot1 = p[j].flags;
18479fa3e853Sbellard             if (prot1 != prot) {
18489fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
18499fa3e853Sbellard                 if (start != -1) {
18509fa3e853Sbellard                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
18519fa3e853Sbellard                             start, end, end - start,
18529fa3e853Sbellard                             prot & PAGE_READ ? 'r' : '-',
18539fa3e853Sbellard                             prot & PAGE_WRITE ? 'w' : '-',
18549fa3e853Sbellard                             prot & PAGE_EXEC ? 'x' : '-');
185533417e70Sbellard                 }
18569fa3e853Sbellard                 if (prot1 != 0)
18579fa3e853Sbellard                     start = end;
18589fa3e853Sbellard                 else
18599fa3e853Sbellard                     start = -1;
18609fa3e853Sbellard                 prot = prot1;
18619fa3e853Sbellard             }
18629fa3e853Sbellard             if (!p)
18639fa3e853Sbellard                 break;
18649fa3e853Sbellard         }
18659fa3e853Sbellard     }
18669fa3e853Sbellard }
18679fa3e853Sbellard 
186853a5960aSpbrook int page_get_flags(target_ulong address)
18699fa3e853Sbellard {
18709fa3e853Sbellard     PageDesc *p;
18719fa3e853Sbellard 
18729fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
18739fa3e853Sbellard     if (!p)
18749fa3e853Sbellard         return 0;
18759fa3e853Sbellard     return p->flags;
18769fa3e853Sbellard }
18779fa3e853Sbellard 
18789fa3e853Sbellard /* modify the flags of a page and invalidate the code if
18799fa3e853Sbellard    necessary. The flag PAGE_WRITE_ORG is positionned automatically
18809fa3e853Sbellard    depending on PAGE_WRITE */
188153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
18829fa3e853Sbellard {
18839fa3e853Sbellard     PageDesc *p;
188453a5960aSpbrook     target_ulong addr;
18859fa3e853Sbellard 
18869fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
18879fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
18889fa3e853Sbellard     if (flags & PAGE_WRITE)
18899fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
18909fa3e853Sbellard     spin_lock(&tb_lock);
18919fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
18929fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
18939fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
18949fa3e853Sbellard            inside */
18959fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
18969fa3e853Sbellard             (flags & PAGE_WRITE) &&
18979fa3e853Sbellard             p->first_tb) {
1898d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
18999fa3e853Sbellard         }
19009fa3e853Sbellard         p->flags = flags;
19019fa3e853Sbellard     }
19029fa3e853Sbellard     spin_unlock(&tb_lock);
19039fa3e853Sbellard }
19049fa3e853Sbellard 
19053d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
19063d97b40bSths {
19073d97b40bSths     PageDesc *p;
19083d97b40bSths     target_ulong end;
19093d97b40bSths     target_ulong addr;
19103d97b40bSths 
19113d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
19123d97b40bSths     start = start & TARGET_PAGE_MASK;
19133d97b40bSths 
19143d97b40bSths     if( end < start )
19153d97b40bSths         /* we've wrapped around */
19163d97b40bSths         return -1;
19173d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
19183d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
19193d97b40bSths         if( !p )
19203d97b40bSths             return -1;
19213d97b40bSths         if( !(p->flags & PAGE_VALID) )
19223d97b40bSths             return -1;
19233d97b40bSths 
1924dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
19253d97b40bSths             return -1;
1926dae3270cSbellard         if (flags & PAGE_WRITE) {
1927dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
19283d97b40bSths                 return -1;
1929dae3270cSbellard             /* unprotect the page if it was put read-only because it
1930dae3270cSbellard                contains translated code */
1931dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
1932dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
1933dae3270cSbellard                     return -1;
1934dae3270cSbellard             }
1935dae3270cSbellard             return 0;
1936dae3270cSbellard         }
19373d97b40bSths     }
19383d97b40bSths     return 0;
19393d97b40bSths }
19403d97b40bSths 
19419fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
19429fa3e853Sbellard    page. Return TRUE if the fault was succesfully handled. */
194353a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
19449fa3e853Sbellard {
19459fa3e853Sbellard     unsigned int page_index, prot, pindex;
19469fa3e853Sbellard     PageDesc *p, *p1;
194753a5960aSpbrook     target_ulong host_start, host_end, addr;
19489fa3e853Sbellard 
194983fb7adfSbellard     host_start = address & qemu_host_page_mask;
19509fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
19519fa3e853Sbellard     p1 = page_find(page_index);
19529fa3e853Sbellard     if (!p1)
19539fa3e853Sbellard         return 0;
195483fb7adfSbellard     host_end = host_start + qemu_host_page_size;
19559fa3e853Sbellard     p = p1;
19569fa3e853Sbellard     prot = 0;
19579fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
19589fa3e853Sbellard         prot |= p->flags;
19599fa3e853Sbellard         p++;
19609fa3e853Sbellard     }
19619fa3e853Sbellard     /* if the page was really writable, then we change its
19629fa3e853Sbellard        protection back to writable */
19639fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
19649fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
19659fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
196653a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
19679fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
19689fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
19699fa3e853Sbellard             /* and since the content will be modified, we must invalidate
19709fa3e853Sbellard                the corresponding translated code. */
1971d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
19729fa3e853Sbellard #ifdef DEBUG_TB_CHECK
19739fa3e853Sbellard             tb_invalidate_check(address);
19749fa3e853Sbellard #endif
19759fa3e853Sbellard             return 1;
19769fa3e853Sbellard         }
19779fa3e853Sbellard     }
19789fa3e853Sbellard     return 0;
19799fa3e853Sbellard }
19809fa3e853Sbellard 
19816a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
19826a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
19831ccde1cbSbellard {
19841ccde1cbSbellard }
19859fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
198633417e70Sbellard 
1987db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1988db7b5426Sblueswir1                              int memory);
1989db7b5426Sblueswir1 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1990db7b5426Sblueswir1                            int orig_memory);
1991db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1992db7b5426Sblueswir1                       need_subpage)                                     \
1993db7b5426Sblueswir1     do {                                                                \
1994db7b5426Sblueswir1         if (addr > start_addr)                                          \
1995db7b5426Sblueswir1             start_addr2 = 0;                                            \
1996db7b5426Sblueswir1         else {                                                          \
1997db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1998db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
1999db7b5426Sblueswir1                 need_subpage = 1;                                       \
2000db7b5426Sblueswir1         }                                                               \
2001db7b5426Sblueswir1                                                                         \
200249e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2003db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2004db7b5426Sblueswir1         else {                                                          \
2005db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2007db7b5426Sblueswir1                 need_subpage = 1;                                       \
2008db7b5426Sblueswir1         }                                                               \
2009db7b5426Sblueswir1     } while (0)
2010db7b5426Sblueswir1 
201133417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
201233417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
201333417e70Sbellard    io memory page */
20142e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr,
20152e12669aSbellard                                   unsigned long size,
20162e12669aSbellard                                   unsigned long phys_offset)
201733417e70Sbellard {
2018108c49b8Sbellard     target_phys_addr_t addr, end_addr;
201992e873b9Sbellard     PhysPageDesc *p;
20209d42037bSbellard     CPUState *env;
2021db7b5426Sblueswir1     unsigned long orig_size = size;
2022db7b5426Sblueswir1     void *subpage;
202333417e70Sbellard 
20245fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
202549e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
202649e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2027db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2028db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2029db7b5426Sblueswir1             unsigned long orig_memory = p->phys_offset;
2030db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2031db7b5426Sblueswir1             int need_subpage = 0;
2032db7b5426Sblueswir1 
2033db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2034db7b5426Sblueswir1                           need_subpage);
20354254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2036db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2037db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2038db7b5426Sblueswir1                                            &p->phys_offset, orig_memory);
2039db7b5426Sblueswir1                 } else {
2040db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2041db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2042db7b5426Sblueswir1                 }
2043db7b5426Sblueswir1                 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2044db7b5426Sblueswir1             } else {
2045db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2046db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2048db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2049db7b5426Sblueswir1             }
2050db7b5426Sblueswir1         } else {
2051108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
20529fa3e853Sbellard             p->phys_offset = phys_offset;
20532a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
20542a4188a3Sbellard                 (phys_offset & IO_MEM_ROMD))
205533417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
2056db7b5426Sblueswir1             else {
2057db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2058db7b5426Sblueswir1                 int need_subpage = 0;
2059db7b5426Sblueswir1 
2060db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2061db7b5426Sblueswir1                               end_addr2, need_subpage);
2062db7b5426Sblueswir1 
20634254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2064db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2065db7b5426Sblueswir1                                            &p->phys_offset, IO_MEM_UNASSIGNED);
2066db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
2067db7b5426Sblueswir1                                      phys_offset);
2068db7b5426Sblueswir1                 }
2069db7b5426Sblueswir1             }
2070db7b5426Sblueswir1         }
207133417e70Sbellard     }
20729d42037bSbellard 
20739d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
20749d42037bSbellard        reset the modified entries */
20759d42037bSbellard     /* XXX: slow ! */
20769d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
20779d42037bSbellard         tlb_flush(env, 1);
20789d42037bSbellard     }
207933417e70Sbellard }
208033417e70Sbellard 
2081ba863458Sbellard /* XXX: temporary until new memory mapping API */
2082ba863458Sbellard uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2083ba863458Sbellard {
2084ba863458Sbellard     PhysPageDesc *p;
2085ba863458Sbellard 
2086ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2087ba863458Sbellard     if (!p)
2088ba863458Sbellard         return IO_MEM_UNASSIGNED;
2089ba863458Sbellard     return p->phys_offset;
2090ba863458Sbellard }
2091ba863458Sbellard 
2092e9a1ab19Sbellard /* XXX: better than nothing */
2093e9a1ab19Sbellard ram_addr_t qemu_ram_alloc(unsigned int size)
2094e9a1ab19Sbellard {
2095e9a1ab19Sbellard     ram_addr_t addr;
2096e9a1ab19Sbellard     if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2097e9a1ab19Sbellard         fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098e9a1ab19Sbellard                 size, phys_ram_size);
2099e9a1ab19Sbellard         abort();
2100e9a1ab19Sbellard     }
2101e9a1ab19Sbellard     addr = phys_ram_alloc_offset;
2102e9a1ab19Sbellard     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2103e9a1ab19Sbellard     return addr;
2104e9a1ab19Sbellard }
2105e9a1ab19Sbellard 
2106e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2107e9a1ab19Sbellard {
2108e9a1ab19Sbellard }
2109e9a1ab19Sbellard 
2110a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
211133417e70Sbellard {
211267d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2113ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
211467d3b957Spbrook #endif
2115b4f0a316Sblueswir1 #ifdef TARGET_SPARC
21166c36d3faSblueswir1     do_unassigned_access(addr, 0, 0, 0);
2117f1ccf904Sths #elif TARGET_CRIS
2118f1ccf904Sths     do_unassigned_access(addr, 0, 0, 0);
2119b4f0a316Sblueswir1 #endif
212033417e70Sbellard     return 0;
212133417e70Sbellard }
212233417e70Sbellard 
2123a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
212433417e70Sbellard {
212567d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2126ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
212767d3b957Spbrook #endif
2128b4f0a316Sblueswir1 #ifdef TARGET_SPARC
21296c36d3faSblueswir1     do_unassigned_access(addr, 1, 0, 0);
2130f1ccf904Sths #elif TARGET_CRIS
2131f1ccf904Sths     do_unassigned_access(addr, 1, 0, 0);
2132b4f0a316Sblueswir1 #endif
213333417e70Sbellard }
213433417e70Sbellard 
213533417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = {
213633417e70Sbellard     unassigned_mem_readb,
213733417e70Sbellard     unassigned_mem_readb,
213833417e70Sbellard     unassigned_mem_readb,
213933417e70Sbellard };
214033417e70Sbellard 
214133417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
214233417e70Sbellard     unassigned_mem_writeb,
214333417e70Sbellard     unassigned_mem_writeb,
214433417e70Sbellard     unassigned_mem_writeb,
214533417e70Sbellard };
214633417e70Sbellard 
2147a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
21481ccde1cbSbellard {
21493a7d929eSbellard     unsigned long ram_addr;
21503a7d929eSbellard     int dirty_flags;
21513a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
21523a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21533a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
21543a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
21553a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
21563a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21573a7d929eSbellard #endif
21583a7d929eSbellard     }
2159c27004ecSbellard     stb_p((uint8_t *)(long)addr, val);
2160f32fc648Sbellard #ifdef USE_KQEMU
2161f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2162f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2163f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2164f32fc648Sbellard #endif
2165f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2166f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2167f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2168f23db169Sbellard        flushed */
2169f23db169Sbellard     if (dirty_flags == 0xff)
21706a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
21711ccde1cbSbellard }
21721ccde1cbSbellard 
2173a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
21741ccde1cbSbellard {
21753a7d929eSbellard     unsigned long ram_addr;
21763a7d929eSbellard     int dirty_flags;
21773a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
21783a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21793a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
21803a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
21813a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
21823a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
21833a7d929eSbellard #endif
21843a7d929eSbellard     }
2185c27004ecSbellard     stw_p((uint8_t *)(long)addr, val);
2186f32fc648Sbellard #ifdef USE_KQEMU
2187f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2188f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2189f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2190f32fc648Sbellard #endif
2191f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2192f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2193f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2194f23db169Sbellard        flushed */
2195f23db169Sbellard     if (dirty_flags == 0xff)
21966a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
21971ccde1cbSbellard }
21981ccde1cbSbellard 
2199a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
22001ccde1cbSbellard {
22013a7d929eSbellard     unsigned long ram_addr;
22023a7d929eSbellard     int dirty_flags;
22033a7d929eSbellard     ram_addr = addr - (unsigned long)phys_ram_base;
22043a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22053a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
22063a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
22073a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
22083a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
22093a7d929eSbellard #endif
22103a7d929eSbellard     }
2211c27004ecSbellard     stl_p((uint8_t *)(long)addr, val);
2212f32fc648Sbellard #ifdef USE_KQEMU
2213f32fc648Sbellard     if (cpu_single_env->kqemu_enabled &&
2214f32fc648Sbellard         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2215f32fc648Sbellard         kqemu_modify_page(cpu_single_env, ram_addr);
2216f32fc648Sbellard #endif
2217f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2218f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2219f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2220f23db169Sbellard        flushed */
2221f23db169Sbellard     if (dirty_flags == 0xff)
22226a00d601Sbellard         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
22231ccde1cbSbellard }
22241ccde1cbSbellard 
22253a7d929eSbellard static CPUReadMemoryFunc *error_mem_read[3] = {
22263a7d929eSbellard     NULL, /* never used */
22273a7d929eSbellard     NULL, /* never used */
22283a7d929eSbellard     NULL, /* never used */
22293a7d929eSbellard };
22303a7d929eSbellard 
22311ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
22321ccde1cbSbellard     notdirty_mem_writeb,
22331ccde1cbSbellard     notdirty_mem_writew,
22341ccde1cbSbellard     notdirty_mem_writel,
22351ccde1cbSbellard };
22361ccde1cbSbellard 
22376658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
22386658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
22396658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
22406658ffb8Spbrook    phys routines.  */
22416658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
22426658ffb8Spbrook {
22436658ffb8Spbrook     return ldub_phys(addr);
22446658ffb8Spbrook }
22456658ffb8Spbrook 
22466658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
22476658ffb8Spbrook {
22486658ffb8Spbrook     return lduw_phys(addr);
22496658ffb8Spbrook }
22506658ffb8Spbrook 
22516658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
22526658ffb8Spbrook {
22536658ffb8Spbrook     return ldl_phys(addr);
22546658ffb8Spbrook }
22556658ffb8Spbrook 
22566658ffb8Spbrook /* Generate a debug exception if a watchpoint has been hit.
22576658ffb8Spbrook    Returns the real physical address of the access.  addr will be a host
2258d79acba4Sbalrog    address in case of a RAM location.  */
22596658ffb8Spbrook static target_ulong check_watchpoint(target_phys_addr_t addr)
22606658ffb8Spbrook {
22616658ffb8Spbrook     CPUState *env = cpu_single_env;
22626658ffb8Spbrook     target_ulong watch;
22636658ffb8Spbrook     target_ulong retaddr;
22646658ffb8Spbrook     int i;
22656658ffb8Spbrook 
22666658ffb8Spbrook     retaddr = addr;
22676658ffb8Spbrook     for (i = 0; i < env->nb_watchpoints; i++) {
22686658ffb8Spbrook         watch = env->watchpoint[i].vaddr;
22696658ffb8Spbrook         if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2270d79acba4Sbalrog             retaddr = addr - env->watchpoint[i].addend;
22716658ffb8Spbrook             if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
22726658ffb8Spbrook                 cpu_single_env->watchpoint_hit = i + 1;
22736658ffb8Spbrook                 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
22746658ffb8Spbrook                 break;
22756658ffb8Spbrook             }
22766658ffb8Spbrook         }
22776658ffb8Spbrook     }
22786658ffb8Spbrook     return retaddr;
22796658ffb8Spbrook }
22806658ffb8Spbrook 
22816658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
22826658ffb8Spbrook                              uint32_t val)
22836658ffb8Spbrook {
22846658ffb8Spbrook     addr = check_watchpoint(addr);
22856658ffb8Spbrook     stb_phys(addr, val);
22866658ffb8Spbrook }
22876658ffb8Spbrook 
22886658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
22896658ffb8Spbrook                              uint32_t val)
22906658ffb8Spbrook {
22916658ffb8Spbrook     addr = check_watchpoint(addr);
22926658ffb8Spbrook     stw_phys(addr, val);
22936658ffb8Spbrook }
22946658ffb8Spbrook 
22956658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
22966658ffb8Spbrook                              uint32_t val)
22976658ffb8Spbrook {
22986658ffb8Spbrook     addr = check_watchpoint(addr);
22996658ffb8Spbrook     stl_phys(addr, val);
23006658ffb8Spbrook }
23016658ffb8Spbrook 
23026658ffb8Spbrook static CPUReadMemoryFunc *watch_mem_read[3] = {
23036658ffb8Spbrook     watch_mem_readb,
23046658ffb8Spbrook     watch_mem_readw,
23056658ffb8Spbrook     watch_mem_readl,
23066658ffb8Spbrook };
23076658ffb8Spbrook 
23086658ffb8Spbrook static CPUWriteMemoryFunc *watch_mem_write[3] = {
23096658ffb8Spbrook     watch_mem_writeb,
23106658ffb8Spbrook     watch_mem_writew,
23116658ffb8Spbrook     watch_mem_writel,
23126658ffb8Spbrook };
23136658ffb8Spbrook #endif
23146658ffb8Spbrook 
2315db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2316db7b5426Sblueswir1                                  unsigned int len)
2317db7b5426Sblueswir1 {
2318db7b5426Sblueswir1     uint32_t ret;
2319db7b5426Sblueswir1     unsigned int idx;
2320db7b5426Sblueswir1 
2321db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2322db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2323db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2324db7b5426Sblueswir1            mmio, len, addr, idx);
2325db7b5426Sblueswir1 #endif
23263ee89922Sblueswir1     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2327db7b5426Sblueswir1 
2328db7b5426Sblueswir1     return ret;
2329db7b5426Sblueswir1 }
2330db7b5426Sblueswir1 
2331db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2332db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2333db7b5426Sblueswir1 {
2334db7b5426Sblueswir1     unsigned int idx;
2335db7b5426Sblueswir1 
2336db7b5426Sblueswir1     idx = SUBPAGE_IDX(addr - mmio->base);
2337db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2338db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2339db7b5426Sblueswir1            mmio, len, addr, idx, value);
2340db7b5426Sblueswir1 #endif
23413ee89922Sblueswir1     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2342db7b5426Sblueswir1 }
2343db7b5426Sblueswir1 
2344db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2345db7b5426Sblueswir1 {
2346db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2347db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2348db7b5426Sblueswir1 #endif
2349db7b5426Sblueswir1 
2350db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2351db7b5426Sblueswir1 }
2352db7b5426Sblueswir1 
2353db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2354db7b5426Sblueswir1                             uint32_t value)
2355db7b5426Sblueswir1 {
2356db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2357db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2358db7b5426Sblueswir1 #endif
2359db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2360db7b5426Sblueswir1 }
2361db7b5426Sblueswir1 
2362db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2363db7b5426Sblueswir1 {
2364db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2365db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2366db7b5426Sblueswir1 #endif
2367db7b5426Sblueswir1 
2368db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2369db7b5426Sblueswir1 }
2370db7b5426Sblueswir1 
2371db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2372db7b5426Sblueswir1                             uint32_t value)
2373db7b5426Sblueswir1 {
2374db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2375db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2376db7b5426Sblueswir1 #endif
2377db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2378db7b5426Sblueswir1 }
2379db7b5426Sblueswir1 
2380db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2381db7b5426Sblueswir1 {
2382db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2383db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2384db7b5426Sblueswir1 #endif
2385db7b5426Sblueswir1 
2386db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2387db7b5426Sblueswir1 }
2388db7b5426Sblueswir1 
2389db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2390db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2391db7b5426Sblueswir1 {
2392db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2393db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2394db7b5426Sblueswir1 #endif
2395db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2396db7b5426Sblueswir1 }
2397db7b5426Sblueswir1 
2398db7b5426Sblueswir1 static CPUReadMemoryFunc *subpage_read[] = {
2399db7b5426Sblueswir1     &subpage_readb,
2400db7b5426Sblueswir1     &subpage_readw,
2401db7b5426Sblueswir1     &subpage_readl,
2402db7b5426Sblueswir1 };
2403db7b5426Sblueswir1 
2404db7b5426Sblueswir1 static CPUWriteMemoryFunc *subpage_write[] = {
2405db7b5426Sblueswir1     &subpage_writeb,
2406db7b5426Sblueswir1     &subpage_writew,
2407db7b5426Sblueswir1     &subpage_writel,
2408db7b5426Sblueswir1 };
2409db7b5426Sblueswir1 
2410db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2411db7b5426Sblueswir1                              int memory)
2412db7b5426Sblueswir1 {
2413db7b5426Sblueswir1     int idx, eidx;
24144254fab8Sblueswir1     unsigned int i;
2415db7b5426Sblueswir1 
2416db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2417db7b5426Sblueswir1         return -1;
2418db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2419db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2420db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2421db7b5426Sblueswir1     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2422db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2423db7b5426Sblueswir1 #endif
2424db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2425db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
24264254fab8Sblueswir1         for (i = 0; i < 4; i++) {
24273ee89922Sblueswir1             if (io_mem_read[memory][i]) {
24283ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
24293ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
24304254fab8Sblueswir1             }
24313ee89922Sblueswir1             if (io_mem_write[memory][i]) {
24323ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
24333ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
24343ee89922Sblueswir1             }
24353ee89922Sblueswir1         }
2436db7b5426Sblueswir1     }
2437db7b5426Sblueswir1 
2438db7b5426Sblueswir1     return 0;
2439db7b5426Sblueswir1 }
2440db7b5426Sblueswir1 
2441db7b5426Sblueswir1 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2442db7b5426Sblueswir1                            int orig_memory)
2443db7b5426Sblueswir1 {
2444db7b5426Sblueswir1     subpage_t *mmio;
2445db7b5426Sblueswir1     int subpage_memory;
2446db7b5426Sblueswir1 
2447db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
2448db7b5426Sblueswir1     if (mmio != NULL) {
2449db7b5426Sblueswir1         mmio->base = base;
2450db7b5426Sblueswir1         subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2451db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2452db7b5426Sblueswir1         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2453db7b5426Sblueswir1                mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2454db7b5426Sblueswir1 #endif
2455db7b5426Sblueswir1         *phys = subpage_memory | IO_MEM_SUBPAGE;
2456db7b5426Sblueswir1         subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2457db7b5426Sblueswir1     }
2458db7b5426Sblueswir1 
2459db7b5426Sblueswir1     return mmio;
2460db7b5426Sblueswir1 }
2461db7b5426Sblueswir1 
246233417e70Sbellard static void io_mem_init(void)
246333417e70Sbellard {
24643a7d929eSbellard     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2465a4193c8aSbellard     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
24663a7d929eSbellard     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
24671ccde1cbSbellard     io_mem_nb = 5;
24681ccde1cbSbellard 
24696658ffb8Spbrook #if defined(CONFIG_SOFTMMU)
24706658ffb8Spbrook     io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
24716658ffb8Spbrook                                           watch_mem_write, NULL);
24726658ffb8Spbrook #endif
24731ccde1cbSbellard     /* alloc dirty bits array */
24740a962c02Sbellard     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
24753a7d929eSbellard     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
247633417e70Sbellard }
247733417e70Sbellard 
247833417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
247933417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
24803ee89922Sblueswir1    2). Functions can be omitted with a NULL function pointer. The
24813ee89922Sblueswir1    registered functions may be modified dynamically later.
24823ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
24834254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
24844254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
24854254fab8Sblueswir1    returned if error. */
248633417e70Sbellard int cpu_register_io_memory(int io_index,
248733417e70Sbellard                            CPUReadMemoryFunc **mem_read,
2488a4193c8aSbellard                            CPUWriteMemoryFunc **mem_write,
2489a4193c8aSbellard                            void *opaque)
249033417e70Sbellard {
24914254fab8Sblueswir1     int i, subwidth = 0;
249233417e70Sbellard 
249333417e70Sbellard     if (io_index <= 0) {
2494b5ff1b31Sbellard         if (io_mem_nb >= IO_MEM_NB_ENTRIES)
249533417e70Sbellard             return -1;
249633417e70Sbellard         io_index = io_mem_nb++;
249733417e70Sbellard     } else {
249833417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
249933417e70Sbellard             return -1;
250033417e70Sbellard     }
250133417e70Sbellard 
250233417e70Sbellard     for(i = 0;i < 3; i++) {
25034254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
25044254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
250533417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
250633417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
250733417e70Sbellard     }
2508a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
25094254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
251033417e70Sbellard }
251161382a50Sbellard 
25128926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
25138926b517Sbellard {
25148926b517Sbellard     return io_mem_write[io_index >> IO_MEM_SHIFT];
25158926b517Sbellard }
25168926b517Sbellard 
25178926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
25188926b517Sbellard {
25198926b517Sbellard     return io_mem_read[io_index >> IO_MEM_SHIFT];
25208926b517Sbellard }
25218926b517Sbellard 
252213eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
252313eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
25242e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
252513eb76e0Sbellard                             int len, int is_write)
252613eb76e0Sbellard {
252713eb76e0Sbellard     int l, flags;
252813eb76e0Sbellard     target_ulong page;
252953a5960aSpbrook     void * p;
253013eb76e0Sbellard 
253113eb76e0Sbellard     while (len > 0) {
253213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
253313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
253413eb76e0Sbellard         if (l > len)
253513eb76e0Sbellard             l = len;
253613eb76e0Sbellard         flags = page_get_flags(page);
253713eb76e0Sbellard         if (!(flags & PAGE_VALID))
253813eb76e0Sbellard             return;
253913eb76e0Sbellard         if (is_write) {
254013eb76e0Sbellard             if (!(flags & PAGE_WRITE))
254113eb76e0Sbellard                 return;
2542579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
2543579a97f7Sbellard             if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2544579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2545579a97f7Sbellard                 return;
254653a5960aSpbrook             memcpy(p, buf, len);
254753a5960aSpbrook             unlock_user(p, addr, len);
254813eb76e0Sbellard         } else {
254913eb76e0Sbellard             if (!(flags & PAGE_READ))
255013eb76e0Sbellard                 return;
2551579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
2552579a97f7Sbellard             if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2553579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2554579a97f7Sbellard                 return;
255553a5960aSpbrook             memcpy(buf, p, len);
255653a5960aSpbrook             unlock_user(p, addr, 0);
255713eb76e0Sbellard         }
255813eb76e0Sbellard         len -= l;
255913eb76e0Sbellard         buf += l;
256013eb76e0Sbellard         addr += l;
256113eb76e0Sbellard     }
256213eb76e0Sbellard }
25638df1cd07Sbellard 
256413eb76e0Sbellard #else
25652e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
256613eb76e0Sbellard                             int len, int is_write)
256713eb76e0Sbellard {
256813eb76e0Sbellard     int l, io_index;
256913eb76e0Sbellard     uint8_t *ptr;
257013eb76e0Sbellard     uint32_t val;
25712e12669aSbellard     target_phys_addr_t page;
25722e12669aSbellard     unsigned long pd;
257392e873b9Sbellard     PhysPageDesc *p;
257413eb76e0Sbellard 
257513eb76e0Sbellard     while (len > 0) {
257613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
257713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
257813eb76e0Sbellard         if (l > len)
257913eb76e0Sbellard             l = len;
258092e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
258113eb76e0Sbellard         if (!p) {
258213eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
258313eb76e0Sbellard         } else {
258413eb76e0Sbellard             pd = p->phys_offset;
258513eb76e0Sbellard         }
258613eb76e0Sbellard 
258713eb76e0Sbellard         if (is_write) {
25883a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
258913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
25906a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
25916a00d601Sbellard                    potential bugs */
259213eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
25931c213d19Sbellard                     /* 32 bit write access */
2594c27004ecSbellard                     val = ldl_p(buf);
2595a4193c8aSbellard                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
259613eb76e0Sbellard                     l = 4;
259713eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
25981c213d19Sbellard                     /* 16 bit write access */
2599c27004ecSbellard                     val = lduw_p(buf);
2600a4193c8aSbellard                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
260113eb76e0Sbellard                     l = 2;
260213eb76e0Sbellard                 } else {
26031c213d19Sbellard                     /* 8 bit write access */
2604c27004ecSbellard                     val = ldub_p(buf);
2605a4193c8aSbellard                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
260613eb76e0Sbellard                     l = 1;
260713eb76e0Sbellard                 }
260813eb76e0Sbellard             } else {
2609b448f2f3Sbellard                 unsigned long addr1;
2610b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
261113eb76e0Sbellard                 /* RAM case */
2612b448f2f3Sbellard                 ptr = phys_ram_base + addr1;
261313eb76e0Sbellard                 memcpy(ptr, buf, l);
26143a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
2615b448f2f3Sbellard                     /* invalidate code */
2616b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2617b448f2f3Sbellard                     /* set dirty bit */
2618f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2619f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
262013eb76e0Sbellard                 }
26213a7d929eSbellard             }
262213eb76e0Sbellard         } else {
26232a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
26242a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
262513eb76e0Sbellard                 /* I/O case */
262613eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
262713eb76e0Sbellard                 if (l >= 4 && ((addr & 3) == 0)) {
262813eb76e0Sbellard                     /* 32 bit read access */
2629a4193c8aSbellard                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2630c27004ecSbellard                     stl_p(buf, val);
263113eb76e0Sbellard                     l = 4;
263213eb76e0Sbellard                 } else if (l >= 2 && ((addr & 1) == 0)) {
263313eb76e0Sbellard                     /* 16 bit read access */
2634a4193c8aSbellard                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2635c27004ecSbellard                     stw_p(buf, val);
263613eb76e0Sbellard                     l = 2;
263713eb76e0Sbellard                 } else {
26381c213d19Sbellard                     /* 8 bit read access */
2639a4193c8aSbellard                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2640c27004ecSbellard                     stb_p(buf, val);
264113eb76e0Sbellard                     l = 1;
264213eb76e0Sbellard                 }
264313eb76e0Sbellard             } else {
264413eb76e0Sbellard                 /* RAM case */
264513eb76e0Sbellard                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
264613eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
264713eb76e0Sbellard                 memcpy(buf, ptr, l);
264813eb76e0Sbellard             }
264913eb76e0Sbellard         }
265013eb76e0Sbellard         len -= l;
265113eb76e0Sbellard         buf += l;
265213eb76e0Sbellard         addr += l;
265313eb76e0Sbellard     }
265413eb76e0Sbellard }
26558df1cd07Sbellard 
2656d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2657d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2658d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2659d0ecd2aaSbellard {
2660d0ecd2aaSbellard     int l;
2661d0ecd2aaSbellard     uint8_t *ptr;
2662d0ecd2aaSbellard     target_phys_addr_t page;
2663d0ecd2aaSbellard     unsigned long pd;
2664d0ecd2aaSbellard     PhysPageDesc *p;
2665d0ecd2aaSbellard 
2666d0ecd2aaSbellard     while (len > 0) {
2667d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
2668d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
2669d0ecd2aaSbellard         if (l > len)
2670d0ecd2aaSbellard             l = len;
2671d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
2672d0ecd2aaSbellard         if (!p) {
2673d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
2674d0ecd2aaSbellard         } else {
2675d0ecd2aaSbellard             pd = p->phys_offset;
2676d0ecd2aaSbellard         }
2677d0ecd2aaSbellard 
2678d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
26792a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
26802a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
2681d0ecd2aaSbellard             /* do nothing */
2682d0ecd2aaSbellard         } else {
2683d0ecd2aaSbellard             unsigned long addr1;
2684d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2685d0ecd2aaSbellard             /* ROM/RAM case */
2686d0ecd2aaSbellard             ptr = phys_ram_base + addr1;
2687d0ecd2aaSbellard             memcpy(ptr, buf, l);
2688d0ecd2aaSbellard         }
2689d0ecd2aaSbellard         len -= l;
2690d0ecd2aaSbellard         buf += l;
2691d0ecd2aaSbellard         addr += l;
2692d0ecd2aaSbellard     }
2693d0ecd2aaSbellard }
2694d0ecd2aaSbellard 
2695d0ecd2aaSbellard 
26968df1cd07Sbellard /* warning: addr must be aligned */
26978df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
26988df1cd07Sbellard {
26998df1cd07Sbellard     int io_index;
27008df1cd07Sbellard     uint8_t *ptr;
27018df1cd07Sbellard     uint32_t val;
27028df1cd07Sbellard     unsigned long pd;
27038df1cd07Sbellard     PhysPageDesc *p;
27048df1cd07Sbellard 
27058df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
27068df1cd07Sbellard     if (!p) {
27078df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
27088df1cd07Sbellard     } else {
27098df1cd07Sbellard         pd = p->phys_offset;
27108df1cd07Sbellard     }
27118df1cd07Sbellard 
27122a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
27132a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
27148df1cd07Sbellard         /* I/O case */
27158df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27168df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
27178df1cd07Sbellard     } else {
27188df1cd07Sbellard         /* RAM case */
27198df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
27208df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
27218df1cd07Sbellard         val = ldl_p(ptr);
27228df1cd07Sbellard     }
27238df1cd07Sbellard     return val;
27248df1cd07Sbellard }
27258df1cd07Sbellard 
272684b7b8e7Sbellard /* warning: addr must be aligned */
272784b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
272884b7b8e7Sbellard {
272984b7b8e7Sbellard     int io_index;
273084b7b8e7Sbellard     uint8_t *ptr;
273184b7b8e7Sbellard     uint64_t val;
273284b7b8e7Sbellard     unsigned long pd;
273384b7b8e7Sbellard     PhysPageDesc *p;
273484b7b8e7Sbellard 
273584b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
273684b7b8e7Sbellard     if (!p) {
273784b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
273884b7b8e7Sbellard     } else {
273984b7b8e7Sbellard         pd = p->phys_offset;
274084b7b8e7Sbellard     }
274184b7b8e7Sbellard 
27422a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
27432a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
274484b7b8e7Sbellard         /* I/O case */
274584b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
274684b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
274784b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
274884b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
274984b7b8e7Sbellard #else
275084b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
275184b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
275284b7b8e7Sbellard #endif
275384b7b8e7Sbellard     } else {
275484b7b8e7Sbellard         /* RAM case */
275584b7b8e7Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
275684b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
275784b7b8e7Sbellard         val = ldq_p(ptr);
275884b7b8e7Sbellard     }
275984b7b8e7Sbellard     return val;
276084b7b8e7Sbellard }
276184b7b8e7Sbellard 
2762aab33094Sbellard /* XXX: optimize */
2763aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
2764aab33094Sbellard {
2765aab33094Sbellard     uint8_t val;
2766aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2767aab33094Sbellard     return val;
2768aab33094Sbellard }
2769aab33094Sbellard 
2770aab33094Sbellard /* XXX: optimize */
2771aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
2772aab33094Sbellard {
2773aab33094Sbellard     uint16_t val;
2774aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2775aab33094Sbellard     return tswap16(val);
2776aab33094Sbellard }
2777aab33094Sbellard 
27788df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
27798df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
27808df1cd07Sbellard    bits are used to track modified PTEs */
27818df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
27828df1cd07Sbellard {
27838df1cd07Sbellard     int io_index;
27848df1cd07Sbellard     uint8_t *ptr;
27858df1cd07Sbellard     unsigned long pd;
27868df1cd07Sbellard     PhysPageDesc *p;
27878df1cd07Sbellard 
27888df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
27898df1cd07Sbellard     if (!p) {
27908df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
27918df1cd07Sbellard     } else {
27928df1cd07Sbellard         pd = p->phys_offset;
27938df1cd07Sbellard     }
27948df1cd07Sbellard 
27953a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
27968df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
27978df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
27988df1cd07Sbellard     } else {
27998df1cd07Sbellard         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
28008df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
28018df1cd07Sbellard         stl_p(ptr, val);
28028df1cd07Sbellard     }
28038df1cd07Sbellard }
28048df1cd07Sbellard 
2805bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2806bc98a7efSj_mayer {
2807bc98a7efSj_mayer     int io_index;
2808bc98a7efSj_mayer     uint8_t *ptr;
2809bc98a7efSj_mayer     unsigned long pd;
2810bc98a7efSj_mayer     PhysPageDesc *p;
2811bc98a7efSj_mayer 
2812bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2813bc98a7efSj_mayer     if (!p) {
2814bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
2815bc98a7efSj_mayer     } else {
2816bc98a7efSj_mayer         pd = p->phys_offset;
2817bc98a7efSj_mayer     }
2818bc98a7efSj_mayer 
2819bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2820bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2821bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
2822bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2823bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2824bc98a7efSj_mayer #else
2825bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2826bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2827bc98a7efSj_mayer #endif
2828bc98a7efSj_mayer     } else {
2829bc98a7efSj_mayer         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2830bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
2831bc98a7efSj_mayer         stq_p(ptr, val);
2832bc98a7efSj_mayer     }
2833bc98a7efSj_mayer }
2834bc98a7efSj_mayer 
28358df1cd07Sbellard /* warning: addr must be aligned */
28368df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
28378df1cd07Sbellard {
28388df1cd07Sbellard     int io_index;
28398df1cd07Sbellard     uint8_t *ptr;
28408df1cd07Sbellard     unsigned long pd;
28418df1cd07Sbellard     PhysPageDesc *p;
28428df1cd07Sbellard 
28438df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
28448df1cd07Sbellard     if (!p) {
28458df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
28468df1cd07Sbellard     } else {
28478df1cd07Sbellard         pd = p->phys_offset;
28488df1cd07Sbellard     }
28498df1cd07Sbellard 
28503a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
28518df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
28528df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
28538df1cd07Sbellard     } else {
28548df1cd07Sbellard         unsigned long addr1;
28558df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
28568df1cd07Sbellard         /* RAM case */
28578df1cd07Sbellard         ptr = phys_ram_base + addr1;
28588df1cd07Sbellard         stl_p(ptr, val);
28593a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
28608df1cd07Sbellard             /* invalidate code */
28618df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
28628df1cd07Sbellard             /* set dirty bit */
2863f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2864f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
28658df1cd07Sbellard         }
28668df1cd07Sbellard     }
28673a7d929eSbellard }
28688df1cd07Sbellard 
2869aab33094Sbellard /* XXX: optimize */
2870aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
2871aab33094Sbellard {
2872aab33094Sbellard     uint8_t v = val;
2873aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2874aab33094Sbellard }
2875aab33094Sbellard 
2876aab33094Sbellard /* XXX: optimize */
2877aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
2878aab33094Sbellard {
2879aab33094Sbellard     uint16_t v = tswap16(val);
2880aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2881aab33094Sbellard }
2882aab33094Sbellard 
2883aab33094Sbellard /* XXX: optimize */
2884aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
2885aab33094Sbellard {
2886aab33094Sbellard     val = tswap64(val);
2887aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2888aab33094Sbellard }
2889aab33094Sbellard 
289013eb76e0Sbellard #endif
289113eb76e0Sbellard 
289213eb76e0Sbellard /* virtual memory access for debug */
2893b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2894b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
289513eb76e0Sbellard {
289613eb76e0Sbellard     int l;
28979b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
28989b3c35e0Sj_mayer     target_ulong page;
289913eb76e0Sbellard 
290013eb76e0Sbellard     while (len > 0) {
290113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
290213eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
290313eb76e0Sbellard         /* if no physical page mapped, return an error */
290413eb76e0Sbellard         if (phys_addr == -1)
290513eb76e0Sbellard             return -1;
290613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
290713eb76e0Sbellard         if (l > len)
290813eb76e0Sbellard             l = len;
2909b448f2f3Sbellard         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2910b448f2f3Sbellard                                buf, l, is_write);
291113eb76e0Sbellard         len -= l;
291213eb76e0Sbellard         buf += l;
291313eb76e0Sbellard         addr += l;
291413eb76e0Sbellard     }
291513eb76e0Sbellard     return 0;
291613eb76e0Sbellard }
291713eb76e0Sbellard 
2918e3db7226Sbellard void dump_exec_info(FILE *f,
2919e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2920e3db7226Sbellard {
2921e3db7226Sbellard     int i, target_code_size, max_target_code_size;
2922e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
2923e3db7226Sbellard     TranslationBlock *tb;
2924e3db7226Sbellard 
2925e3db7226Sbellard     target_code_size = 0;
2926e3db7226Sbellard     max_target_code_size = 0;
2927e3db7226Sbellard     cross_page = 0;
2928e3db7226Sbellard     direct_jmp_count = 0;
2929e3db7226Sbellard     direct_jmp2_count = 0;
2930e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
2931e3db7226Sbellard         tb = &tbs[i];
2932e3db7226Sbellard         target_code_size += tb->size;
2933e3db7226Sbellard         if (tb->size > max_target_code_size)
2934e3db7226Sbellard             max_target_code_size = tb->size;
2935e3db7226Sbellard         if (tb->page_addr[1] != -1)
2936e3db7226Sbellard             cross_page++;
2937e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
2938e3db7226Sbellard             direct_jmp_count++;
2939e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
2940e3db7226Sbellard                 direct_jmp2_count++;
2941e3db7226Sbellard             }
2942e3db7226Sbellard         }
2943e3db7226Sbellard     }
2944e3db7226Sbellard     /* XXX: avoid using doubles ? */
294557fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
2946e3db7226Sbellard     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2947e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2948e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
2949e3db7226Sbellard                 max_target_code_size);
2950e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2951e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2952e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2953e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2954e3db7226Sbellard             cross_page,
2955e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2956e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2957e3db7226Sbellard                 direct_jmp_count,
2958e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2959e3db7226Sbellard                 direct_jmp2_count,
2960e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
296157fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
2962e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2963e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2964e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
296557fec1feSbellard #ifdef CONFIG_PROFILER
296657fec1feSbellard     {
296757fec1feSbellard         int64_t tot;
296857fec1feSbellard         tot = dyngen_interm_time + dyngen_code_time;
296957fec1feSbellard         cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
297057fec1feSbellard                     tot, tot / 2.4e9);
297157fec1feSbellard         cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
297257fec1feSbellard                     dyngen_tb_count,
297357fec1feSbellard                     dyngen_tb_count1 - dyngen_tb_count,
297457fec1feSbellard                     dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
297557fec1feSbellard         cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n",
297657fec1feSbellard                     dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
297757fec1feSbellard         cpu_fprintf(f, "old ops/total ops   %0.1f%%\n",
297857fec1feSbellard                     dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
297957fec1feSbellard         cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
298057fec1feSbellard                     dyngen_tb_count ?
298157fec1feSbellard                     (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
298257fec1feSbellard         cpu_fprintf(f, "cycles/op           %0.1f\n",
298357fec1feSbellard                     dyngen_op_count ? (double)tot / dyngen_op_count : 0);
298457fec1feSbellard         cpu_fprintf(f, "cycles/in byte     %0.1f\n",
298557fec1feSbellard                     dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
298657fec1feSbellard         cpu_fprintf(f, "cycles/out byte     %0.1f\n",
298757fec1feSbellard                     dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
298857fec1feSbellard         if (tot == 0)
298957fec1feSbellard             tot = 1;
299057fec1feSbellard         cpu_fprintf(f, "  gen_interm time   %0.1f%%\n",
299157fec1feSbellard                     (double)dyngen_interm_time / tot * 100.0);
299257fec1feSbellard         cpu_fprintf(f, "  gen_code time     %0.1f%%\n",
299357fec1feSbellard                     (double)dyngen_code_time / tot * 100.0);
299457fec1feSbellard         cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
299557fec1feSbellard                     dyngen_restore_count);
299657fec1feSbellard         cpu_fprintf(f, "  avg cycles        %0.1f\n",
299757fec1feSbellard                     dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
299857fec1feSbellard         {
299957fec1feSbellard             extern void dump_op_count(void);
300057fec1feSbellard             dump_op_count();
300157fec1feSbellard         }
300257fec1feSbellard     }
300357fec1feSbellard #endif
3004e3db7226Sbellard }
3005e3db7226Sbellard 
300661382a50Sbellard #if !defined(CONFIG_USER_ONLY)
300761382a50Sbellard 
300861382a50Sbellard #define MMUSUFFIX _cmmu
300961382a50Sbellard #define GETPC() NULL
301061382a50Sbellard #define env cpu_single_env
3011b769d8feSbellard #define SOFTMMU_CODE_ACCESS
301261382a50Sbellard 
301361382a50Sbellard #define SHIFT 0
301461382a50Sbellard #include "softmmu_template.h"
301561382a50Sbellard 
301661382a50Sbellard #define SHIFT 1
301761382a50Sbellard #include "softmmu_template.h"
301861382a50Sbellard 
301961382a50Sbellard #define SHIFT 2
302061382a50Sbellard #include "softmmu_template.h"
302161382a50Sbellard 
302261382a50Sbellard #define SHIFT 3
302361382a50Sbellard #include "softmmu_template.h"
302461382a50Sbellard 
302561382a50Sbellard #undef env
302661382a50Sbellard 
302761382a50Sbellard #endif
3028