154936004Sbellard /* 2fd6ce8f6Sbellard * virtual page mapping and translated block handling 354936004Sbellard * 454936004Sbellard * Copyright (c) 2003 Fabrice Bellard 554936004Sbellard * 654936004Sbellard * This library is free software; you can redistribute it and/or 754936004Sbellard * modify it under the terms of the GNU Lesser General Public 854936004Sbellard * License as published by the Free Software Foundation; either 954936004Sbellard * version 2 of the License, or (at your option) any later version. 1054936004Sbellard * 1154936004Sbellard * This library is distributed in the hope that it will be useful, 1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1454936004Sbellard * Lesser General Public License for more details. 1554936004Sbellard * 1654936004Sbellard * You should have received a copy of the GNU Lesser General Public 1754936004Sbellard * License along with this library; if not, write to the Free Software 1854936004Sbellard * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 1954936004Sbellard */ 2067b915a5Sbellard #include "config.h" 21d5a8f07cSbellard #ifdef _WIN32 22d5a8f07cSbellard #include <windows.h> 23d5a8f07cSbellard #else 24a98d49b1Sbellard #include <sys/types.h> 25d5a8f07cSbellard #include <sys/mman.h> 26d5a8f07cSbellard #endif 2754936004Sbellard #include <stdlib.h> 2854936004Sbellard #include <stdio.h> 2954936004Sbellard #include <stdarg.h> 3054936004Sbellard #include <string.h> 3154936004Sbellard #include <errno.h> 3254936004Sbellard #include <unistd.h> 3354936004Sbellard #include <inttypes.h> 3454936004Sbellard 356180a181Sbellard #include "cpu.h" 366180a181Sbellard #include "exec-all.h" 3754936004Sbellard 38fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE 3966e85a21Sbellard //#define DEBUG_FLUSH 409fa3e853Sbellard //#define DEBUG_TLB 41fd6ce8f6Sbellard 42fd6ce8f6Sbellard /* make various TB consistency checks */ 43fd6ce8f6Sbellard //#define DEBUG_TB_CHECK 4498857888Sbellard //#define DEBUG_TLB_CHECK 45fd6ce8f6Sbellard 46fd6ce8f6Sbellard /* threshold to flush the translated code buffer */ 47fd6ce8f6Sbellard #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) 48fd6ce8f6Sbellard 499fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10 509fa3e853Sbellard 519fa3e853Sbellard #define MMAP_AREA_START 0x00000000 529fa3e853Sbellard #define MMAP_AREA_END 0xa8000000 53fd6ce8f6Sbellard 54fd6ce8f6Sbellard TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; 55fd6ce8f6Sbellard TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; 569fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 57fd6ce8f6Sbellard int nb_tbs; 58eb51d102Sbellard /* any access to the tbs or the page table must use this lock */ 59eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; 60fd6ce8f6Sbellard 61fd6ce8f6Sbellard uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; 62fd6ce8f6Sbellard uint8_t *code_gen_ptr; 63fd6ce8f6Sbellard 649fa3e853Sbellard int phys_ram_size; 659fa3e853Sbellard int phys_ram_fd; 669fa3e853Sbellard uint8_t *phys_ram_base; 671ccde1cbSbellard uint8_t *phys_ram_dirty; 689fa3e853Sbellard 6954936004Sbellard typedef struct PageDesc { 7092e873b9Sbellard /* list of TBs intersecting this ram page */ 71fd6ce8f6Sbellard TranslationBlock *first_tb; 729fa3e853Sbellard /* in order to optimize self modifying code, we count the number 739fa3e853Sbellard of lookups we do to a given page to use a bitmap */ 749fa3e853Sbellard unsigned int code_write_count; 759fa3e853Sbellard uint8_t *code_bitmap; 769fa3e853Sbellard #if defined(CONFIG_USER_ONLY) 779fa3e853Sbellard unsigned long flags; 789fa3e853Sbellard #endif 7954936004Sbellard } PageDesc; 8054936004Sbellard 8192e873b9Sbellard typedef struct PhysPageDesc { 8292e873b9Sbellard /* offset in host memory of the page + io_index in the low 12 bits */ 8392e873b9Sbellard unsigned long phys_offset; 8492e873b9Sbellard } PhysPageDesc; 8592e873b9Sbellard 869fa3e853Sbellard typedef struct VirtPageDesc { 879fa3e853Sbellard /* physical address of code page. It is valid only if 'valid_tag' 889fa3e853Sbellard matches 'virt_valid_tag' */ 899fa3e853Sbellard target_ulong phys_addr; 909fa3e853Sbellard unsigned int valid_tag; 919fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 929fa3e853Sbellard /* original page access rights. It is valid only if 'valid_tag' 939fa3e853Sbellard matches 'virt_valid_tag' */ 949fa3e853Sbellard unsigned int prot; 959fa3e853Sbellard #endif 969fa3e853Sbellard } VirtPageDesc; 979fa3e853Sbellard 9854936004Sbellard #define L2_BITS 10 9954936004Sbellard #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) 10054936004Sbellard 10154936004Sbellard #define L1_SIZE (1 << L1_BITS) 10254936004Sbellard #define L2_SIZE (1 << L2_BITS) 10354936004Sbellard 10433417e70Sbellard static void io_mem_init(void); 105fd6ce8f6Sbellard 10683fb7adfSbellard unsigned long qemu_real_host_page_size; 10783fb7adfSbellard unsigned long qemu_host_page_bits; 10883fb7adfSbellard unsigned long qemu_host_page_size; 10983fb7adfSbellard unsigned long qemu_host_page_mask; 11054936004Sbellard 11192e873b9Sbellard /* XXX: for system emulation, it could just be an array */ 11254936004Sbellard static PageDesc *l1_map[L1_SIZE]; 11392e873b9Sbellard static PhysPageDesc *l1_phys_map[L1_SIZE]; 11454936004Sbellard 1159fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 1169fa3e853Sbellard static VirtPageDesc *l1_virt_map[L1_SIZE]; 1179fa3e853Sbellard static unsigned int virt_valid_tag; 1189fa3e853Sbellard #endif 1199fa3e853Sbellard 12033417e70Sbellard /* io memory support */ 12133417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 12233417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 123a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 12433417e70Sbellard static int io_mem_nb; 12533417e70Sbellard 12634865134Sbellard /* log support */ 12734865134Sbellard char *logfilename = "/tmp/qemu.log"; 12834865134Sbellard FILE *logfile; 12934865134Sbellard int loglevel; 13034865134Sbellard 131b346ff46Sbellard static void page_init(void) 13254936004Sbellard { 13383fb7adfSbellard /* NOTE: we can always suppose that qemu_host_page_size >= 13454936004Sbellard TARGET_PAGE_SIZE */ 13567b915a5Sbellard #ifdef _WIN32 136d5a8f07cSbellard { 137d5a8f07cSbellard SYSTEM_INFO system_info; 138d5a8f07cSbellard DWORD old_protect; 139d5a8f07cSbellard 140d5a8f07cSbellard GetSystemInfo(&system_info); 141d5a8f07cSbellard qemu_real_host_page_size = system_info.dwPageSize; 142d5a8f07cSbellard 143d5a8f07cSbellard VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), 144d5a8f07cSbellard PAGE_EXECUTE_READWRITE, &old_protect); 145d5a8f07cSbellard } 14667b915a5Sbellard #else 14783fb7adfSbellard qemu_real_host_page_size = getpagesize(); 148d5a8f07cSbellard { 149d5a8f07cSbellard unsigned long start, end; 150d5a8f07cSbellard 151d5a8f07cSbellard start = (unsigned long)code_gen_buffer; 152d5a8f07cSbellard start &= ~(qemu_real_host_page_size - 1); 153d5a8f07cSbellard 154d5a8f07cSbellard end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); 155d5a8f07cSbellard end += qemu_real_host_page_size - 1; 156d5a8f07cSbellard end &= ~(qemu_real_host_page_size - 1); 157d5a8f07cSbellard 158d5a8f07cSbellard mprotect((void *)start, end - start, 159d5a8f07cSbellard PROT_READ | PROT_WRITE | PROT_EXEC); 160d5a8f07cSbellard } 16167b915a5Sbellard #endif 162d5a8f07cSbellard 16383fb7adfSbellard if (qemu_host_page_size == 0) 16483fb7adfSbellard qemu_host_page_size = qemu_real_host_page_size; 16583fb7adfSbellard if (qemu_host_page_size < TARGET_PAGE_SIZE) 16683fb7adfSbellard qemu_host_page_size = TARGET_PAGE_SIZE; 16783fb7adfSbellard qemu_host_page_bits = 0; 16883fb7adfSbellard while ((1 << qemu_host_page_bits) < qemu_host_page_size) 16983fb7adfSbellard qemu_host_page_bits++; 17083fb7adfSbellard qemu_host_page_mask = ~(qemu_host_page_size - 1); 1719fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 1729fa3e853Sbellard virt_valid_tag = 1; 1739fa3e853Sbellard #endif 17454936004Sbellard } 17554936004Sbellard 176fd6ce8f6Sbellard static inline PageDesc *page_find_alloc(unsigned int index) 17754936004Sbellard { 17854936004Sbellard PageDesc **lp, *p; 17954936004Sbellard 18054936004Sbellard lp = &l1_map[index >> L2_BITS]; 18154936004Sbellard p = *lp; 18254936004Sbellard if (!p) { 18354936004Sbellard /* allocate if not found */ 18459817ccbSbellard p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); 185fd6ce8f6Sbellard memset(p, 0, sizeof(PageDesc) * L2_SIZE); 18654936004Sbellard *lp = p; 18754936004Sbellard } 18854936004Sbellard return p + (index & (L2_SIZE - 1)); 18954936004Sbellard } 19054936004Sbellard 191fd6ce8f6Sbellard static inline PageDesc *page_find(unsigned int index) 19254936004Sbellard { 19354936004Sbellard PageDesc *p; 19454936004Sbellard 19554936004Sbellard p = l1_map[index >> L2_BITS]; 19654936004Sbellard if (!p) 19754936004Sbellard return 0; 198fd6ce8f6Sbellard return p + (index & (L2_SIZE - 1)); 19954936004Sbellard } 20054936004Sbellard 20192e873b9Sbellard static inline PhysPageDesc *phys_page_find_alloc(unsigned int index) 20292e873b9Sbellard { 20392e873b9Sbellard PhysPageDesc **lp, *p; 20492e873b9Sbellard 20592e873b9Sbellard lp = &l1_phys_map[index >> L2_BITS]; 20692e873b9Sbellard p = *lp; 20792e873b9Sbellard if (!p) { 20892e873b9Sbellard /* allocate if not found */ 20992e873b9Sbellard p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE); 21092e873b9Sbellard memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); 21192e873b9Sbellard *lp = p; 21292e873b9Sbellard } 21392e873b9Sbellard return p + (index & (L2_SIZE - 1)); 21492e873b9Sbellard } 21592e873b9Sbellard 21692e873b9Sbellard static inline PhysPageDesc *phys_page_find(unsigned int index) 21792e873b9Sbellard { 21892e873b9Sbellard PhysPageDesc *p; 21992e873b9Sbellard 22092e873b9Sbellard p = l1_phys_map[index >> L2_BITS]; 22192e873b9Sbellard if (!p) 22292e873b9Sbellard return 0; 22392e873b9Sbellard return p + (index & (L2_SIZE - 1)); 22492e873b9Sbellard } 22592e873b9Sbellard 2269fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 2274f2ac237Sbellard static void tlb_protect_code(CPUState *env, target_ulong addr); 2284f2ac237Sbellard static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); 229fd6ce8f6Sbellard 2309fa3e853Sbellard static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) 2319fa3e853Sbellard { 2329fa3e853Sbellard VirtPageDesc **lp, *p; 2339fa3e853Sbellard 2349fa3e853Sbellard lp = &l1_virt_map[index >> L2_BITS]; 2359fa3e853Sbellard p = *lp; 2369fa3e853Sbellard if (!p) { 2379fa3e853Sbellard /* allocate if not found */ 23859817ccbSbellard p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE); 2399fa3e853Sbellard memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); 2409fa3e853Sbellard *lp = p; 2419fa3e853Sbellard } 2429fa3e853Sbellard return p + (index & (L2_SIZE - 1)); 2439fa3e853Sbellard } 2449fa3e853Sbellard 2459fa3e853Sbellard static inline VirtPageDesc *virt_page_find(unsigned int index) 2469fa3e853Sbellard { 2479fa3e853Sbellard VirtPageDesc *p; 2489fa3e853Sbellard 2499fa3e853Sbellard p = l1_virt_map[index >> L2_BITS]; 250fd6ce8f6Sbellard if (!p) 251fd6ce8f6Sbellard return 0; 2529fa3e853Sbellard return p + (index & (L2_SIZE - 1)); 253fd6ce8f6Sbellard } 254fd6ce8f6Sbellard 2559fa3e853Sbellard static void virt_page_flush(void) 25654936004Sbellard { 2579fa3e853Sbellard int i, j; 2589fa3e853Sbellard VirtPageDesc *p; 25954936004Sbellard 2609fa3e853Sbellard virt_valid_tag++; 2619fa3e853Sbellard 2629fa3e853Sbellard if (virt_valid_tag == 0) { 2639fa3e853Sbellard virt_valid_tag = 1; 2649fa3e853Sbellard for(i = 0; i < L1_SIZE; i++) { 2659fa3e853Sbellard p = l1_virt_map[i]; 2669fa3e853Sbellard if (p) { 2679fa3e853Sbellard for(j = 0; j < L2_SIZE; j++) 2689fa3e853Sbellard p[j].valid_tag = 0; 269fd6ce8f6Sbellard } 27054936004Sbellard } 27154936004Sbellard } 2729fa3e853Sbellard } 2739fa3e853Sbellard #else 2749fa3e853Sbellard static void virt_page_flush(void) 2759fa3e853Sbellard { 2769fa3e853Sbellard } 2779fa3e853Sbellard #endif 278fd6ce8f6Sbellard 279b346ff46Sbellard void cpu_exec_init(void) 280fd6ce8f6Sbellard { 281fd6ce8f6Sbellard if (!code_gen_ptr) { 282fd6ce8f6Sbellard code_gen_ptr = code_gen_buffer; 283b346ff46Sbellard page_init(); 28433417e70Sbellard io_mem_init(); 285fd6ce8f6Sbellard } 286fd6ce8f6Sbellard } 287fd6ce8f6Sbellard 2889fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p) 2899fa3e853Sbellard { 2909fa3e853Sbellard if (p->code_bitmap) { 29159817ccbSbellard qemu_free(p->code_bitmap); 2929fa3e853Sbellard p->code_bitmap = NULL; 2939fa3e853Sbellard } 2949fa3e853Sbellard p->code_write_count = 0; 2959fa3e853Sbellard } 2969fa3e853Sbellard 297fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */ 298fd6ce8f6Sbellard static void page_flush_tb(void) 299fd6ce8f6Sbellard { 300fd6ce8f6Sbellard int i, j; 301fd6ce8f6Sbellard PageDesc *p; 302fd6ce8f6Sbellard 303fd6ce8f6Sbellard for(i = 0; i < L1_SIZE; i++) { 304fd6ce8f6Sbellard p = l1_map[i]; 305fd6ce8f6Sbellard if (p) { 3069fa3e853Sbellard for(j = 0; j < L2_SIZE; j++) { 3079fa3e853Sbellard p->first_tb = NULL; 3089fa3e853Sbellard invalidate_page_bitmap(p); 3099fa3e853Sbellard p++; 3109fa3e853Sbellard } 311fd6ce8f6Sbellard } 312fd6ce8f6Sbellard } 313fd6ce8f6Sbellard } 314fd6ce8f6Sbellard 315fd6ce8f6Sbellard /* flush all the translation blocks */ 316d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */ 3170124311eSbellard void tb_flush(CPUState *env) 318fd6ce8f6Sbellard { 3190124311eSbellard #if defined(DEBUG_FLUSH) 320fd6ce8f6Sbellard printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 321fd6ce8f6Sbellard code_gen_ptr - code_gen_buffer, 322fd6ce8f6Sbellard nb_tbs, 3230124311eSbellard nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); 324fd6ce8f6Sbellard #endif 325fd6ce8f6Sbellard nb_tbs = 0; 3268a8a608fSbellard memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); 3279fa3e853Sbellard virt_page_flush(); 3289fa3e853Sbellard 3298a8a608fSbellard memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); 330fd6ce8f6Sbellard page_flush_tb(); 3319fa3e853Sbellard 332fd6ce8f6Sbellard code_gen_ptr = code_gen_buffer; 333d4e8164fSbellard /* XXX: flush processor icache at this point if cache flush is 334d4e8164fSbellard expensive */ 335fd6ce8f6Sbellard } 336fd6ce8f6Sbellard 337fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK 338fd6ce8f6Sbellard 339fd6ce8f6Sbellard static void tb_invalidate_check(unsigned long address) 340fd6ce8f6Sbellard { 341fd6ce8f6Sbellard TranslationBlock *tb; 342fd6ce8f6Sbellard int i; 343fd6ce8f6Sbellard address &= TARGET_PAGE_MASK; 344fd6ce8f6Sbellard for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { 345fd6ce8f6Sbellard for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { 346fd6ce8f6Sbellard if (!(address + TARGET_PAGE_SIZE <= tb->pc || 347fd6ce8f6Sbellard address >= tb->pc + tb->size)) { 348fd6ce8f6Sbellard printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", 349fd6ce8f6Sbellard address, tb->pc, tb->size); 350fd6ce8f6Sbellard } 351fd6ce8f6Sbellard } 352fd6ce8f6Sbellard } 353fd6ce8f6Sbellard } 354fd6ce8f6Sbellard 355fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */ 356fd6ce8f6Sbellard static void tb_page_check(void) 357fd6ce8f6Sbellard { 358fd6ce8f6Sbellard TranslationBlock *tb; 359fd6ce8f6Sbellard int i, flags1, flags2; 360fd6ce8f6Sbellard 361fd6ce8f6Sbellard for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { 362fd6ce8f6Sbellard for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { 363fd6ce8f6Sbellard flags1 = page_get_flags(tb->pc); 364fd6ce8f6Sbellard flags2 = page_get_flags(tb->pc + tb->size - 1); 365fd6ce8f6Sbellard if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 366fd6ce8f6Sbellard printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 367fd6ce8f6Sbellard tb->pc, tb->size, flags1, flags2); 368fd6ce8f6Sbellard } 369fd6ce8f6Sbellard } 370fd6ce8f6Sbellard } 371fd6ce8f6Sbellard } 372fd6ce8f6Sbellard 373d4e8164fSbellard void tb_jmp_check(TranslationBlock *tb) 374d4e8164fSbellard { 375d4e8164fSbellard TranslationBlock *tb1; 376d4e8164fSbellard unsigned int n1; 377d4e8164fSbellard 378d4e8164fSbellard /* suppress any remaining jumps to this TB */ 379d4e8164fSbellard tb1 = tb->jmp_first; 380d4e8164fSbellard for(;;) { 381d4e8164fSbellard n1 = (long)tb1 & 3; 382d4e8164fSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 383d4e8164fSbellard if (n1 == 2) 384d4e8164fSbellard break; 385d4e8164fSbellard tb1 = tb1->jmp_next[n1]; 386d4e8164fSbellard } 387d4e8164fSbellard /* check end of list */ 388d4e8164fSbellard if (tb1 != tb) { 389d4e8164fSbellard printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); 390d4e8164fSbellard } 391d4e8164fSbellard } 392d4e8164fSbellard 393fd6ce8f6Sbellard #endif 394fd6ce8f6Sbellard 395fd6ce8f6Sbellard /* invalidate one TB */ 396fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, 397fd6ce8f6Sbellard int next_offset) 398fd6ce8f6Sbellard { 399fd6ce8f6Sbellard TranslationBlock *tb1; 400fd6ce8f6Sbellard for(;;) { 401fd6ce8f6Sbellard tb1 = *ptb; 402fd6ce8f6Sbellard if (tb1 == tb) { 403fd6ce8f6Sbellard *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); 404fd6ce8f6Sbellard break; 405fd6ce8f6Sbellard } 406fd6ce8f6Sbellard ptb = (TranslationBlock **)((char *)tb1 + next_offset); 407fd6ce8f6Sbellard } 408fd6ce8f6Sbellard } 409fd6ce8f6Sbellard 4109fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 4119fa3e853Sbellard { 4129fa3e853Sbellard TranslationBlock *tb1; 4139fa3e853Sbellard unsigned int n1; 4149fa3e853Sbellard 4159fa3e853Sbellard for(;;) { 4169fa3e853Sbellard tb1 = *ptb; 4179fa3e853Sbellard n1 = (long)tb1 & 3; 4189fa3e853Sbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 4199fa3e853Sbellard if (tb1 == tb) { 4209fa3e853Sbellard *ptb = tb1->page_next[n1]; 4219fa3e853Sbellard break; 4229fa3e853Sbellard } 4239fa3e853Sbellard ptb = &tb1->page_next[n1]; 4249fa3e853Sbellard } 4259fa3e853Sbellard } 4269fa3e853Sbellard 427d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n) 428d4e8164fSbellard { 429d4e8164fSbellard TranslationBlock *tb1, **ptb; 430d4e8164fSbellard unsigned int n1; 431d4e8164fSbellard 432d4e8164fSbellard ptb = &tb->jmp_next[n]; 433d4e8164fSbellard tb1 = *ptb; 434d4e8164fSbellard if (tb1) { 435d4e8164fSbellard /* find tb(n) in circular list */ 436d4e8164fSbellard for(;;) { 437d4e8164fSbellard tb1 = *ptb; 438d4e8164fSbellard n1 = (long)tb1 & 3; 439d4e8164fSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 440d4e8164fSbellard if (n1 == n && tb1 == tb) 441d4e8164fSbellard break; 442d4e8164fSbellard if (n1 == 2) { 443d4e8164fSbellard ptb = &tb1->jmp_first; 444d4e8164fSbellard } else { 445d4e8164fSbellard ptb = &tb1->jmp_next[n1]; 446d4e8164fSbellard } 447d4e8164fSbellard } 448d4e8164fSbellard /* now we can suppress tb(n) from the list */ 449d4e8164fSbellard *ptb = tb->jmp_next[n]; 450d4e8164fSbellard 451d4e8164fSbellard tb->jmp_next[n] = NULL; 452d4e8164fSbellard } 453d4e8164fSbellard } 454d4e8164fSbellard 455d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to 456d4e8164fSbellard another TB */ 457d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n) 458d4e8164fSbellard { 459d4e8164fSbellard tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); 460d4e8164fSbellard } 461d4e8164fSbellard 4629fa3e853Sbellard static inline void tb_invalidate(TranslationBlock *tb) 463fd6ce8f6Sbellard { 464d4e8164fSbellard unsigned int h, n1; 4659fa3e853Sbellard TranslationBlock *tb1, *tb2, **ptb; 466fd6ce8f6Sbellard 46736bdbe54Sbellard tb_invalidated_flag = 1; 46836bdbe54Sbellard 469fd6ce8f6Sbellard /* remove the TB from the hash list */ 470fd6ce8f6Sbellard h = tb_hash_func(tb->pc); 4719fa3e853Sbellard ptb = &tb_hash[h]; 4729fa3e853Sbellard for(;;) { 4739fa3e853Sbellard tb1 = *ptb; 4749fa3e853Sbellard /* NOTE: the TB is not necessarily linked in the hash. It 4759fa3e853Sbellard indicates that it is not currently used */ 4769fa3e853Sbellard if (tb1 == NULL) 4779fa3e853Sbellard return; 4789fa3e853Sbellard if (tb1 == tb) { 4799fa3e853Sbellard *ptb = tb1->hash_next; 4809fa3e853Sbellard break; 481fd6ce8f6Sbellard } 4829fa3e853Sbellard ptb = &tb1->hash_next; 483fd6ce8f6Sbellard } 484d4e8164fSbellard 485d4e8164fSbellard /* suppress this TB from the two jump lists */ 486d4e8164fSbellard tb_jmp_remove(tb, 0); 487d4e8164fSbellard tb_jmp_remove(tb, 1); 488d4e8164fSbellard 489d4e8164fSbellard /* suppress any remaining jumps to this TB */ 490d4e8164fSbellard tb1 = tb->jmp_first; 491d4e8164fSbellard for(;;) { 492d4e8164fSbellard n1 = (long)tb1 & 3; 493d4e8164fSbellard if (n1 == 2) 494d4e8164fSbellard break; 495d4e8164fSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 496d4e8164fSbellard tb2 = tb1->jmp_next[n1]; 497d4e8164fSbellard tb_reset_jump(tb1, n1); 498d4e8164fSbellard tb1->jmp_next[n1] = NULL; 499d4e8164fSbellard tb1 = tb2; 500d4e8164fSbellard } 501d4e8164fSbellard tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ 502fd6ce8f6Sbellard } 503fd6ce8f6Sbellard 5049fa3e853Sbellard static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) 505fd6ce8f6Sbellard { 506fd6ce8f6Sbellard PageDesc *p; 5079fa3e853Sbellard unsigned int h; 5089fa3e853Sbellard target_ulong phys_pc; 509fd6ce8f6Sbellard 5109fa3e853Sbellard /* remove the TB from the hash list */ 5119fa3e853Sbellard phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 5129fa3e853Sbellard h = tb_phys_hash_func(phys_pc); 5139fa3e853Sbellard tb_remove(&tb_phys_hash[h], tb, 5149fa3e853Sbellard offsetof(TranslationBlock, phys_hash_next)); 5159fa3e853Sbellard 5169fa3e853Sbellard /* remove the TB from the page list */ 5179fa3e853Sbellard if (tb->page_addr[0] != page_addr) { 5189fa3e853Sbellard p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 5199fa3e853Sbellard tb_page_remove(&p->first_tb, tb); 5209fa3e853Sbellard invalidate_page_bitmap(p); 5219fa3e853Sbellard } 5229fa3e853Sbellard if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { 5239fa3e853Sbellard p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 5249fa3e853Sbellard tb_page_remove(&p->first_tb, tb); 5259fa3e853Sbellard invalidate_page_bitmap(p); 5269fa3e853Sbellard } 5279fa3e853Sbellard 5289fa3e853Sbellard tb_invalidate(tb); 5299fa3e853Sbellard } 5309fa3e853Sbellard 5319fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len) 5329fa3e853Sbellard { 5339fa3e853Sbellard int end, mask, end1; 5349fa3e853Sbellard 5359fa3e853Sbellard end = start + len; 5369fa3e853Sbellard tab += start >> 3; 5379fa3e853Sbellard mask = 0xff << (start & 7); 5389fa3e853Sbellard if ((start & ~7) == (end & ~7)) { 5399fa3e853Sbellard if (start < end) { 5409fa3e853Sbellard mask &= ~(0xff << (end & 7)); 5419fa3e853Sbellard *tab |= mask; 5429fa3e853Sbellard } 5439fa3e853Sbellard } else { 5449fa3e853Sbellard *tab++ |= mask; 5459fa3e853Sbellard start = (start + 8) & ~7; 5469fa3e853Sbellard end1 = end & ~7; 5479fa3e853Sbellard while (start < end1) { 5489fa3e853Sbellard *tab++ = 0xff; 5499fa3e853Sbellard start += 8; 5509fa3e853Sbellard } 5519fa3e853Sbellard if (start < end) { 5529fa3e853Sbellard mask = ~(0xff << (end & 7)); 5539fa3e853Sbellard *tab |= mask; 5549fa3e853Sbellard } 5559fa3e853Sbellard } 5569fa3e853Sbellard } 5579fa3e853Sbellard 5589fa3e853Sbellard static void build_page_bitmap(PageDesc *p) 5599fa3e853Sbellard { 5609fa3e853Sbellard int n, tb_start, tb_end; 5619fa3e853Sbellard TranslationBlock *tb; 5629fa3e853Sbellard 56359817ccbSbellard p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); 5649fa3e853Sbellard if (!p->code_bitmap) 5659fa3e853Sbellard return; 5669fa3e853Sbellard memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); 5679fa3e853Sbellard 5689fa3e853Sbellard tb = p->first_tb; 5699fa3e853Sbellard while (tb != NULL) { 5709fa3e853Sbellard n = (long)tb & 3; 5719fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 5729fa3e853Sbellard /* NOTE: this is subtle as a TB may span two physical pages */ 5739fa3e853Sbellard if (n == 0) { 5749fa3e853Sbellard /* NOTE: tb_end may be after the end of the page, but 5759fa3e853Sbellard it is not a problem */ 5769fa3e853Sbellard tb_start = tb->pc & ~TARGET_PAGE_MASK; 5779fa3e853Sbellard tb_end = tb_start + tb->size; 5789fa3e853Sbellard if (tb_end > TARGET_PAGE_SIZE) 5799fa3e853Sbellard tb_end = TARGET_PAGE_SIZE; 5809fa3e853Sbellard } else { 5819fa3e853Sbellard tb_start = 0; 5829fa3e853Sbellard tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 5839fa3e853Sbellard } 5849fa3e853Sbellard set_bits(p->code_bitmap, tb_start, tb_end - tb_start); 5859fa3e853Sbellard tb = tb->page_next[n]; 5869fa3e853Sbellard } 5879fa3e853Sbellard } 5889fa3e853Sbellard 589d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 590d720b93dSbellard 591d720b93dSbellard static void tb_gen_code(CPUState *env, 592d720b93dSbellard target_ulong pc, target_ulong cs_base, int flags, 593d720b93dSbellard int cflags) 594d720b93dSbellard { 595d720b93dSbellard TranslationBlock *tb; 596d720b93dSbellard uint8_t *tc_ptr; 597d720b93dSbellard target_ulong phys_pc, phys_page2, virt_page2; 598d720b93dSbellard int code_gen_size; 599d720b93dSbellard 600d720b93dSbellard phys_pc = get_phys_addr_code(env, (unsigned long)pc); 601d720b93dSbellard tb = tb_alloc((unsigned long)pc); 602d720b93dSbellard if (!tb) { 603d720b93dSbellard /* flush must be done */ 604d720b93dSbellard tb_flush(env); 605d720b93dSbellard /* cannot fail at this point */ 606d720b93dSbellard tb = tb_alloc((unsigned long)pc); 607d720b93dSbellard } 608d720b93dSbellard tc_ptr = code_gen_ptr; 609d720b93dSbellard tb->tc_ptr = tc_ptr; 610d720b93dSbellard tb->cs_base = cs_base; 611d720b93dSbellard tb->flags = flags; 612d720b93dSbellard tb->cflags = cflags; 613d720b93dSbellard cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); 614d720b93dSbellard code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 615d720b93dSbellard 616d720b93dSbellard /* check next page if needed */ 617d720b93dSbellard virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; 618d720b93dSbellard phys_page2 = -1; 619d720b93dSbellard if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { 620d720b93dSbellard phys_page2 = get_phys_addr_code(env, virt_page2); 621d720b93dSbellard } 622d720b93dSbellard tb_link_phys(tb, phys_pc, phys_page2); 623d720b93dSbellard } 624d720b93dSbellard #endif 625d720b93dSbellard 6269fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page 6279fa3e853Sbellard starting in range [start;end[. NOTE: start and end must refer to 628d720b93dSbellard the same physical page. 'is_cpu_write_access' should be true if called 629d720b93dSbellard from a real cpu write access: the virtual CPU will exit the current 630d720b93dSbellard TB if code is modified inside this TB. */ 631d720b93dSbellard void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 632d720b93dSbellard int is_cpu_write_access) 6339fa3e853Sbellard { 634d720b93dSbellard int n, current_tb_modified, current_tb_not_found, current_flags; 635d720b93dSbellard CPUState *env = cpu_single_env; 6369fa3e853Sbellard PageDesc *p; 637ea1c1802Sbellard TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; 6389fa3e853Sbellard target_ulong tb_start, tb_end; 639d720b93dSbellard target_ulong current_pc, current_cs_base; 6409fa3e853Sbellard 6419fa3e853Sbellard p = page_find(start >> TARGET_PAGE_BITS); 6429fa3e853Sbellard if (!p) 6439fa3e853Sbellard return; 6449fa3e853Sbellard if (!p->code_bitmap && 645d720b93dSbellard ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && 646d720b93dSbellard is_cpu_write_access) { 6479fa3e853Sbellard /* build code bitmap */ 6489fa3e853Sbellard build_page_bitmap(p); 6499fa3e853Sbellard } 6509fa3e853Sbellard 6519fa3e853Sbellard /* we remove all the TBs in the range [start, end[ */ 6529fa3e853Sbellard /* XXX: see if in some cases it could be faster to invalidate all the code */ 653d720b93dSbellard current_tb_not_found = is_cpu_write_access; 654d720b93dSbellard current_tb_modified = 0; 655d720b93dSbellard current_tb = NULL; /* avoid warning */ 656d720b93dSbellard current_pc = 0; /* avoid warning */ 657d720b93dSbellard current_cs_base = 0; /* avoid warning */ 658d720b93dSbellard current_flags = 0; /* avoid warning */ 6599fa3e853Sbellard tb = p->first_tb; 6609fa3e853Sbellard while (tb != NULL) { 6619fa3e853Sbellard n = (long)tb & 3; 6629fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 6639fa3e853Sbellard tb_next = tb->page_next[n]; 6649fa3e853Sbellard /* NOTE: this is subtle as a TB may span two physical pages */ 6659fa3e853Sbellard if (n == 0) { 6669fa3e853Sbellard /* NOTE: tb_end may be after the end of the page, but 6679fa3e853Sbellard it is not a problem */ 6689fa3e853Sbellard tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 6699fa3e853Sbellard tb_end = tb_start + tb->size; 6709fa3e853Sbellard } else { 6719fa3e853Sbellard tb_start = tb->page_addr[1]; 6729fa3e853Sbellard tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 6739fa3e853Sbellard } 6749fa3e853Sbellard if (!(tb_end <= start || tb_start >= end)) { 675d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 676d720b93dSbellard if (current_tb_not_found) { 677d720b93dSbellard current_tb_not_found = 0; 678d720b93dSbellard current_tb = NULL; 679d720b93dSbellard if (env->mem_write_pc) { 680d720b93dSbellard /* now we have a real cpu fault */ 681d720b93dSbellard current_tb = tb_find_pc(env->mem_write_pc); 682d720b93dSbellard } 683d720b93dSbellard } 684d720b93dSbellard if (current_tb == tb && 685d720b93dSbellard !(current_tb->cflags & CF_SINGLE_INSN)) { 686d720b93dSbellard /* If we are modifying the current TB, we must stop 687d720b93dSbellard its execution. We could be more precise by checking 688d720b93dSbellard that the modification is after the current PC, but it 689d720b93dSbellard would require a specialized function to partially 690d720b93dSbellard restore the CPU state */ 691d720b93dSbellard 692d720b93dSbellard current_tb_modified = 1; 693d720b93dSbellard cpu_restore_state(current_tb, env, 694d720b93dSbellard env->mem_write_pc, NULL); 695d720b93dSbellard #if defined(TARGET_I386) 696d720b93dSbellard current_flags = env->hflags; 697d720b93dSbellard current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 698d720b93dSbellard current_cs_base = (target_ulong)env->segs[R_CS].base; 699d720b93dSbellard current_pc = current_cs_base + env->eip; 700d720b93dSbellard #else 701d720b93dSbellard #error unsupported CPU 702d720b93dSbellard #endif 703d720b93dSbellard } 704d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */ 705ea1c1802Sbellard saved_tb = env->current_tb; 706ea1c1802Sbellard env->current_tb = NULL; 7079fa3e853Sbellard tb_phys_invalidate(tb, -1); 708ea1c1802Sbellard env->current_tb = saved_tb; 709ea1c1802Sbellard if (env->interrupt_request && env->current_tb) 710ea1c1802Sbellard cpu_interrupt(env, env->interrupt_request); 7119fa3e853Sbellard } 7129fa3e853Sbellard tb = tb_next; 7139fa3e853Sbellard } 7149fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 7159fa3e853Sbellard /* if no code remaining, no need to continue to use slow writes */ 7169fa3e853Sbellard if (!p->first_tb) { 7179fa3e853Sbellard invalidate_page_bitmap(p); 718d720b93dSbellard if (is_cpu_write_access) { 719d720b93dSbellard tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); 720d720b93dSbellard } 721d720b93dSbellard } 722d720b93dSbellard #endif 723d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 724d720b93dSbellard if (current_tb_modified) { 725d720b93dSbellard /* we generate a block containing just the instruction 726d720b93dSbellard modifying the memory. It will ensure that it cannot modify 727d720b93dSbellard itself */ 728ea1c1802Sbellard env->current_tb = NULL; 729d720b93dSbellard tb_gen_code(env, current_pc, current_cs_base, current_flags, 730d720b93dSbellard CF_SINGLE_INSN); 731d720b93dSbellard cpu_resume_from_signal(env, NULL); 7329fa3e853Sbellard } 7339fa3e853Sbellard #endif 7349fa3e853Sbellard } 7359fa3e853Sbellard 7369fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */ 737d720b93dSbellard static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) 7389fa3e853Sbellard { 7399fa3e853Sbellard PageDesc *p; 7409fa3e853Sbellard int offset, b; 74159817ccbSbellard #if 0 742a4193c8aSbellard if (1) { 743a4193c8aSbellard if (loglevel) { 744a4193c8aSbellard fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 745a4193c8aSbellard cpu_single_env->mem_write_vaddr, len, 746a4193c8aSbellard cpu_single_env->eip, 747a4193c8aSbellard cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 748a4193c8aSbellard } 74959817ccbSbellard } 75059817ccbSbellard #endif 7519fa3e853Sbellard p = page_find(start >> TARGET_PAGE_BITS); 7529fa3e853Sbellard if (!p) 7539fa3e853Sbellard return; 7549fa3e853Sbellard if (p->code_bitmap) { 7559fa3e853Sbellard offset = start & ~TARGET_PAGE_MASK; 7569fa3e853Sbellard b = p->code_bitmap[offset >> 3] >> (offset & 7); 7579fa3e853Sbellard if (b & ((1 << len) - 1)) 7589fa3e853Sbellard goto do_invalidate; 7599fa3e853Sbellard } else { 7609fa3e853Sbellard do_invalidate: 761d720b93dSbellard tb_invalidate_phys_page_range(start, start + len, 1); 7629fa3e853Sbellard } 7639fa3e853Sbellard } 7649fa3e853Sbellard 7659fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 766d720b93dSbellard static void tb_invalidate_phys_page(target_ulong addr, 767d720b93dSbellard unsigned long pc, void *puc) 7689fa3e853Sbellard { 769d720b93dSbellard int n, current_flags, current_tb_modified; 770d720b93dSbellard target_ulong current_pc, current_cs_base; 7719fa3e853Sbellard PageDesc *p; 772d720b93dSbellard TranslationBlock *tb, *current_tb; 773d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 774d720b93dSbellard CPUState *env = cpu_single_env; 775d720b93dSbellard #endif 7769fa3e853Sbellard 7779fa3e853Sbellard addr &= TARGET_PAGE_MASK; 7789fa3e853Sbellard p = page_find(addr >> TARGET_PAGE_BITS); 779fd6ce8f6Sbellard if (!p) 780fd6ce8f6Sbellard return; 781fd6ce8f6Sbellard tb = p->first_tb; 782d720b93dSbellard current_tb_modified = 0; 783d720b93dSbellard current_tb = NULL; 784d720b93dSbellard current_pc = 0; /* avoid warning */ 785d720b93dSbellard current_cs_base = 0; /* avoid warning */ 786d720b93dSbellard current_flags = 0; /* avoid warning */ 787d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 788d720b93dSbellard if (tb && pc != 0) { 789d720b93dSbellard current_tb = tb_find_pc(pc); 790d720b93dSbellard } 791d720b93dSbellard #endif 792fd6ce8f6Sbellard while (tb != NULL) { 7939fa3e853Sbellard n = (long)tb & 3; 7949fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 795d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 796d720b93dSbellard if (current_tb == tb && 797d720b93dSbellard !(current_tb->cflags & CF_SINGLE_INSN)) { 798d720b93dSbellard /* If we are modifying the current TB, we must stop 799d720b93dSbellard its execution. We could be more precise by checking 800d720b93dSbellard that the modification is after the current PC, but it 801d720b93dSbellard would require a specialized function to partially 802d720b93dSbellard restore the CPU state */ 803d720b93dSbellard 804d720b93dSbellard current_tb_modified = 1; 805d720b93dSbellard cpu_restore_state(current_tb, env, pc, puc); 806d720b93dSbellard #if defined(TARGET_I386) 807d720b93dSbellard current_flags = env->hflags; 808d720b93dSbellard current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 809d720b93dSbellard current_cs_base = (target_ulong)env->segs[R_CS].base; 810d720b93dSbellard current_pc = current_cs_base + env->eip; 811d720b93dSbellard #else 812d720b93dSbellard #error unsupported CPU 813d720b93dSbellard #endif 814d720b93dSbellard } 815d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */ 8169fa3e853Sbellard tb_phys_invalidate(tb, addr); 8179fa3e853Sbellard tb = tb->page_next[n]; 818fd6ce8f6Sbellard } 819fd6ce8f6Sbellard p->first_tb = NULL; 820d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 821d720b93dSbellard if (current_tb_modified) { 822d720b93dSbellard /* we generate a block containing just the instruction 823d720b93dSbellard modifying the memory. It will ensure that it cannot modify 824d720b93dSbellard itself */ 825ea1c1802Sbellard env->current_tb = NULL; 826d720b93dSbellard tb_gen_code(env, current_pc, current_cs_base, current_flags, 827d720b93dSbellard CF_SINGLE_INSN); 828d720b93dSbellard cpu_resume_from_signal(env, puc); 829d720b93dSbellard } 830d720b93dSbellard #endif 831fd6ce8f6Sbellard } 8329fa3e853Sbellard #endif 833fd6ce8f6Sbellard 834fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */ 8359fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb, 8369fa3e853Sbellard unsigned int n, unsigned int page_addr) 837fd6ce8f6Sbellard { 838fd6ce8f6Sbellard PageDesc *p; 8399fa3e853Sbellard TranslationBlock *last_first_tb; 8409fa3e853Sbellard 8419fa3e853Sbellard tb->page_addr[n] = page_addr; 8429fa3e853Sbellard p = page_find(page_addr >> TARGET_PAGE_BITS); 8439fa3e853Sbellard tb->page_next[n] = p->first_tb; 8449fa3e853Sbellard last_first_tb = p->first_tb; 8459fa3e853Sbellard p->first_tb = (TranslationBlock *)((long)tb | n); 8469fa3e853Sbellard invalidate_page_bitmap(p); 8479fa3e853Sbellard 848107db443Sbellard #if defined(TARGET_HAS_SMC) || 1 849d720b93dSbellard 8509fa3e853Sbellard #if defined(CONFIG_USER_ONLY) 8519fa3e853Sbellard if (p->flags & PAGE_WRITE) { 8529fa3e853Sbellard unsigned long host_start, host_end, addr; 853fd6ce8f6Sbellard int prot; 854fd6ce8f6Sbellard 855fd6ce8f6Sbellard /* force the host page as non writable (writes will have a 856fd6ce8f6Sbellard page fault + mprotect overhead) */ 85783fb7adfSbellard host_start = page_addr & qemu_host_page_mask; 85883fb7adfSbellard host_end = host_start + qemu_host_page_size; 859fd6ce8f6Sbellard prot = 0; 860fd6ce8f6Sbellard for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) 861fd6ce8f6Sbellard prot |= page_get_flags(addr); 86283fb7adfSbellard mprotect((void *)host_start, qemu_host_page_size, 863fd6ce8f6Sbellard (prot & PAGE_BITS) & ~PAGE_WRITE); 864fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE 865fd6ce8f6Sbellard printf("protecting code page: 0x%08lx\n", 866fd6ce8f6Sbellard host_start); 867fd6ce8f6Sbellard #endif 868fd6ce8f6Sbellard p->flags &= ~PAGE_WRITE; 869fd6ce8f6Sbellard } 8709fa3e853Sbellard #else 8719fa3e853Sbellard /* if some code is already present, then the pages are already 8729fa3e853Sbellard protected. So we handle the case where only the first TB is 8739fa3e853Sbellard allocated in a physical page */ 8749fa3e853Sbellard if (!last_first_tb) { 8759fa3e853Sbellard target_ulong virt_addr; 8769fa3e853Sbellard 8779fa3e853Sbellard virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); 8789fa3e853Sbellard tlb_protect_code(cpu_single_env, virt_addr); 8799fa3e853Sbellard } 8809fa3e853Sbellard #endif 881d720b93dSbellard 882d720b93dSbellard #endif /* TARGET_HAS_SMC */ 883fd6ce8f6Sbellard } 884fd6ce8f6Sbellard 885fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if 886fd6ce8f6Sbellard too many translation blocks or too much generated code. */ 887d4e8164fSbellard TranslationBlock *tb_alloc(unsigned long pc) 888fd6ce8f6Sbellard { 889fd6ce8f6Sbellard TranslationBlock *tb; 890fd6ce8f6Sbellard 891fd6ce8f6Sbellard if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 892fd6ce8f6Sbellard (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) 893d4e8164fSbellard return NULL; 894fd6ce8f6Sbellard tb = &tbs[nb_tbs++]; 895fd6ce8f6Sbellard tb->pc = pc; 896b448f2f3Sbellard tb->cflags = 0; 897d4e8164fSbellard return tb; 898d4e8164fSbellard } 899d4e8164fSbellard 9009fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is 9019fa3e853Sbellard (-1) to indicate that only one page contains the TB. */ 9029fa3e853Sbellard void tb_link_phys(TranslationBlock *tb, 9039fa3e853Sbellard target_ulong phys_pc, target_ulong phys_page2) 904d4e8164fSbellard { 9059fa3e853Sbellard unsigned int h; 9069fa3e853Sbellard TranslationBlock **ptb; 9079fa3e853Sbellard 9089fa3e853Sbellard /* add in the physical hash table */ 9099fa3e853Sbellard h = tb_phys_hash_func(phys_pc); 9109fa3e853Sbellard ptb = &tb_phys_hash[h]; 9119fa3e853Sbellard tb->phys_hash_next = *ptb; 9129fa3e853Sbellard *ptb = tb; 913fd6ce8f6Sbellard 914fd6ce8f6Sbellard /* add in the page list */ 9159fa3e853Sbellard tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); 9169fa3e853Sbellard if (phys_page2 != -1) 9179fa3e853Sbellard tb_alloc_page(tb, 1, phys_page2); 9189fa3e853Sbellard else 9199fa3e853Sbellard tb->page_addr[1] = -1; 92061382a50Sbellard #ifdef DEBUG_TB_CHECK 92161382a50Sbellard tb_page_check(); 92261382a50Sbellard #endif 9239fa3e853Sbellard } 9249fa3e853Sbellard 9259fa3e853Sbellard /* link the tb with the other TBs */ 9269fa3e853Sbellard void tb_link(TranslationBlock *tb) 9279fa3e853Sbellard { 9289fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 9299fa3e853Sbellard { 9309fa3e853Sbellard VirtPageDesc *vp; 9319fa3e853Sbellard target_ulong addr; 9329fa3e853Sbellard 9339fa3e853Sbellard /* save the code memory mappings (needed to invalidate the code) */ 9349fa3e853Sbellard addr = tb->pc & TARGET_PAGE_MASK; 9359fa3e853Sbellard vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); 93698857888Sbellard #ifdef DEBUG_TLB_CHECK 93798857888Sbellard if (vp->valid_tag == virt_valid_tag && 93898857888Sbellard vp->phys_addr != tb->page_addr[0]) { 93998857888Sbellard printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", 94098857888Sbellard addr, tb->page_addr[0], vp->phys_addr); 94198857888Sbellard } 94298857888Sbellard #endif 9439fa3e853Sbellard vp->phys_addr = tb->page_addr[0]; 94459817ccbSbellard if (vp->valid_tag != virt_valid_tag) { 9459fa3e853Sbellard vp->valid_tag = virt_valid_tag; 94659817ccbSbellard #if !defined(CONFIG_SOFTMMU) 94759817ccbSbellard vp->prot = 0; 94859817ccbSbellard #endif 94959817ccbSbellard } 9509fa3e853Sbellard 9519fa3e853Sbellard if (tb->page_addr[1] != -1) { 9529fa3e853Sbellard addr += TARGET_PAGE_SIZE; 9539fa3e853Sbellard vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); 95498857888Sbellard #ifdef DEBUG_TLB_CHECK 95598857888Sbellard if (vp->valid_tag == virt_valid_tag && 95698857888Sbellard vp->phys_addr != tb->page_addr[1]) { 95798857888Sbellard printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", 95898857888Sbellard addr, tb->page_addr[1], vp->phys_addr); 95998857888Sbellard } 96098857888Sbellard #endif 9619fa3e853Sbellard vp->phys_addr = tb->page_addr[1]; 96259817ccbSbellard if (vp->valid_tag != virt_valid_tag) { 9639fa3e853Sbellard vp->valid_tag = virt_valid_tag; 96459817ccbSbellard #if !defined(CONFIG_SOFTMMU) 96559817ccbSbellard vp->prot = 0; 96659817ccbSbellard #endif 96759817ccbSbellard } 9689fa3e853Sbellard } 9699fa3e853Sbellard } 9709fa3e853Sbellard #endif 9719fa3e853Sbellard 972d4e8164fSbellard tb->jmp_first = (TranslationBlock *)((long)tb | 2); 973d4e8164fSbellard tb->jmp_next[0] = NULL; 974d4e8164fSbellard tb->jmp_next[1] = NULL; 975b448f2f3Sbellard #ifdef USE_CODE_COPY 976b448f2f3Sbellard tb->cflags &= ~CF_FP_USED; 977b448f2f3Sbellard if (tb->cflags & CF_TB_FP_USED) 978b448f2f3Sbellard tb->cflags |= CF_FP_USED; 979b448f2f3Sbellard #endif 980d4e8164fSbellard 981d4e8164fSbellard /* init original jump addresses */ 982d4e8164fSbellard if (tb->tb_next_offset[0] != 0xffff) 983d4e8164fSbellard tb_reset_jump(tb, 0); 984d4e8164fSbellard if (tb->tb_next_offset[1] != 0xffff) 985d4e8164fSbellard tb_reset_jump(tb, 1); 986fd6ce8f6Sbellard } 987fd6ce8f6Sbellard 988a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < 989a513fe19Sbellard tb[1].tc_ptr. Return NULL if not found */ 990a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr) 991a513fe19Sbellard { 992a513fe19Sbellard int m_min, m_max, m; 993a513fe19Sbellard unsigned long v; 994a513fe19Sbellard TranslationBlock *tb; 995a513fe19Sbellard 996a513fe19Sbellard if (nb_tbs <= 0) 997a513fe19Sbellard return NULL; 998a513fe19Sbellard if (tc_ptr < (unsigned long)code_gen_buffer || 999a513fe19Sbellard tc_ptr >= (unsigned long)code_gen_ptr) 1000a513fe19Sbellard return NULL; 1001a513fe19Sbellard /* binary search (cf Knuth) */ 1002a513fe19Sbellard m_min = 0; 1003a513fe19Sbellard m_max = nb_tbs - 1; 1004a513fe19Sbellard while (m_min <= m_max) { 1005a513fe19Sbellard m = (m_min + m_max) >> 1; 1006a513fe19Sbellard tb = &tbs[m]; 1007a513fe19Sbellard v = (unsigned long)tb->tc_ptr; 1008a513fe19Sbellard if (v == tc_ptr) 1009a513fe19Sbellard return tb; 1010a513fe19Sbellard else if (tc_ptr < v) { 1011a513fe19Sbellard m_max = m - 1; 1012a513fe19Sbellard } else { 1013a513fe19Sbellard m_min = m + 1; 1014a513fe19Sbellard } 1015a513fe19Sbellard } 1016a513fe19Sbellard return &tbs[m_max]; 1017a513fe19Sbellard } 10187501267eSbellard 1019ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb); 1020ea041c0eSbellard 1021ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) 1022ea041c0eSbellard { 1023ea041c0eSbellard TranslationBlock *tb1, *tb_next, **ptb; 1024ea041c0eSbellard unsigned int n1; 1025ea041c0eSbellard 1026ea041c0eSbellard tb1 = tb->jmp_next[n]; 1027ea041c0eSbellard if (tb1 != NULL) { 1028ea041c0eSbellard /* find head of list */ 1029ea041c0eSbellard for(;;) { 1030ea041c0eSbellard n1 = (long)tb1 & 3; 1031ea041c0eSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 1032ea041c0eSbellard if (n1 == 2) 1033ea041c0eSbellard break; 1034ea041c0eSbellard tb1 = tb1->jmp_next[n1]; 1035ea041c0eSbellard } 1036ea041c0eSbellard /* we are now sure now that tb jumps to tb1 */ 1037ea041c0eSbellard tb_next = tb1; 1038ea041c0eSbellard 1039ea041c0eSbellard /* remove tb from the jmp_first list */ 1040ea041c0eSbellard ptb = &tb_next->jmp_first; 1041ea041c0eSbellard for(;;) { 1042ea041c0eSbellard tb1 = *ptb; 1043ea041c0eSbellard n1 = (long)tb1 & 3; 1044ea041c0eSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 1045ea041c0eSbellard if (n1 == n && tb1 == tb) 1046ea041c0eSbellard break; 1047ea041c0eSbellard ptb = &tb1->jmp_next[n1]; 1048ea041c0eSbellard } 1049ea041c0eSbellard *ptb = tb->jmp_next[n]; 1050ea041c0eSbellard tb->jmp_next[n] = NULL; 1051ea041c0eSbellard 1052ea041c0eSbellard /* suppress the jump to next tb in generated code */ 1053ea041c0eSbellard tb_reset_jump(tb, n); 1054ea041c0eSbellard 10550124311eSbellard /* suppress jumps in the tb on which we could have jumped */ 1056ea041c0eSbellard tb_reset_jump_recursive(tb_next); 1057ea041c0eSbellard } 1058ea041c0eSbellard } 1059ea041c0eSbellard 1060ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb) 1061ea041c0eSbellard { 1062ea041c0eSbellard tb_reset_jump_recursive2(tb, 0); 1063ea041c0eSbellard tb_reset_jump_recursive2(tb, 1); 1064ea041c0eSbellard } 1065ea041c0eSbellard 1066d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1067d720b93dSbellard { 1068d720b93dSbellard target_ulong phys_addr; 1069d720b93dSbellard 1070d720b93dSbellard phys_addr = cpu_get_phys_page_debug(env, pc); 1071d720b93dSbellard tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); 1072d720b93dSbellard } 1073d720b93dSbellard 1074c33a346eSbellard /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a 1075c33a346eSbellard breakpoint is reached */ 10762e12669aSbellard int cpu_breakpoint_insert(CPUState *env, target_ulong pc) 10774c3a88a2Sbellard { 1078e95c8d51Sbellard #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC) 10794c3a88a2Sbellard int i; 10804c3a88a2Sbellard 10814c3a88a2Sbellard for(i = 0; i < env->nb_breakpoints; i++) { 10824c3a88a2Sbellard if (env->breakpoints[i] == pc) 10834c3a88a2Sbellard return 0; 10844c3a88a2Sbellard } 10854c3a88a2Sbellard 10864c3a88a2Sbellard if (env->nb_breakpoints >= MAX_BREAKPOINTS) 10874c3a88a2Sbellard return -1; 10884c3a88a2Sbellard env->breakpoints[env->nb_breakpoints++] = pc; 1089d720b93dSbellard 1090d720b93dSbellard breakpoint_invalidate(env, pc); 10914c3a88a2Sbellard return 0; 10924c3a88a2Sbellard #else 10934c3a88a2Sbellard return -1; 10944c3a88a2Sbellard #endif 10954c3a88a2Sbellard } 10964c3a88a2Sbellard 10974c3a88a2Sbellard /* remove a breakpoint */ 10982e12669aSbellard int cpu_breakpoint_remove(CPUState *env, target_ulong pc) 10994c3a88a2Sbellard { 1100e95c8d51Sbellard #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC) 11014c3a88a2Sbellard int i; 11024c3a88a2Sbellard for(i = 0; i < env->nb_breakpoints; i++) { 11034c3a88a2Sbellard if (env->breakpoints[i] == pc) 11044c3a88a2Sbellard goto found; 11054c3a88a2Sbellard } 11064c3a88a2Sbellard return -1; 11074c3a88a2Sbellard found: 11084c3a88a2Sbellard memmove(&env->breakpoints[i], &env->breakpoints[i + 1], 11094c3a88a2Sbellard (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0])); 11104c3a88a2Sbellard env->nb_breakpoints--; 1111d720b93dSbellard 1112d720b93dSbellard breakpoint_invalidate(env, pc); 11134c3a88a2Sbellard return 0; 11144c3a88a2Sbellard #else 11154c3a88a2Sbellard return -1; 11164c3a88a2Sbellard #endif 11174c3a88a2Sbellard } 11184c3a88a2Sbellard 1119c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the 1120c33a346eSbellard CPU loop after each instruction */ 1121c33a346eSbellard void cpu_single_step(CPUState *env, int enabled) 1122c33a346eSbellard { 1123e95c8d51Sbellard #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC) 1124c33a346eSbellard if (env->singlestep_enabled != enabled) { 1125c33a346eSbellard env->singlestep_enabled = enabled; 1126c33a346eSbellard /* must flush all the translated code to avoid inconsistancies */ 11279fa3e853Sbellard /* XXX: only flush what is necessary */ 11280124311eSbellard tb_flush(env); 1129c33a346eSbellard } 1130c33a346eSbellard #endif 1131c33a346eSbellard } 1132c33a346eSbellard 113334865134Sbellard /* enable or disable low levels log */ 113434865134Sbellard void cpu_set_log(int log_flags) 113534865134Sbellard { 113634865134Sbellard loglevel = log_flags; 113734865134Sbellard if (loglevel && !logfile) { 113834865134Sbellard logfile = fopen(logfilename, "w"); 113934865134Sbellard if (!logfile) { 114034865134Sbellard perror(logfilename); 114134865134Sbellard _exit(1); 114234865134Sbellard } 11439fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 11449fa3e853Sbellard /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ 11459fa3e853Sbellard { 11469fa3e853Sbellard static uint8_t logfile_buf[4096]; 11479fa3e853Sbellard setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); 11489fa3e853Sbellard } 11499fa3e853Sbellard #else 115034865134Sbellard setvbuf(logfile, NULL, _IOLBF, 0); 11519fa3e853Sbellard #endif 115234865134Sbellard } 115334865134Sbellard } 115434865134Sbellard 115534865134Sbellard void cpu_set_log_filename(const char *filename) 115634865134Sbellard { 115734865134Sbellard logfilename = strdup(filename); 115834865134Sbellard } 1159c33a346eSbellard 11600124311eSbellard /* mask must never be zero, except for A20 change call */ 116168a79315Sbellard void cpu_interrupt(CPUState *env, int mask) 1162ea041c0eSbellard { 1163ea041c0eSbellard TranslationBlock *tb; 1164ee8b7021Sbellard static int interrupt_lock; 1165ea041c0eSbellard 116668a79315Sbellard env->interrupt_request |= mask; 1167ea041c0eSbellard /* if the cpu is currently executing code, we must unlink it and 1168ea041c0eSbellard all the potentially executing TB */ 1169ea041c0eSbellard tb = env->current_tb; 1170ee8b7021Sbellard if (tb && !testandset(&interrupt_lock)) { 1171ee8b7021Sbellard env->current_tb = NULL; 1172ea041c0eSbellard tb_reset_jump_recursive(tb); 1173ee8b7021Sbellard interrupt_lock = 0; 1174ea041c0eSbellard } 1175ea041c0eSbellard } 1176ea041c0eSbellard 1177b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask) 1178b54ad049Sbellard { 1179b54ad049Sbellard env->interrupt_request &= ~mask; 1180b54ad049Sbellard } 1181b54ad049Sbellard 1182f193c797Sbellard CPULogItem cpu_log_items[] = { 1183f193c797Sbellard { CPU_LOG_TB_OUT_ASM, "out_asm", 1184f193c797Sbellard "show generated host assembly code for each compiled TB" }, 1185f193c797Sbellard { CPU_LOG_TB_IN_ASM, "in_asm", 1186f193c797Sbellard "show target assembly code for each compiled TB" }, 1187f193c797Sbellard { CPU_LOG_TB_OP, "op", 1188f193c797Sbellard "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, 1189f193c797Sbellard #ifdef TARGET_I386 1190f193c797Sbellard { CPU_LOG_TB_OP_OPT, "op_opt", 1191f193c797Sbellard "show micro ops after optimization for each compiled TB" }, 1192f193c797Sbellard #endif 1193f193c797Sbellard { CPU_LOG_INT, "int", 1194f193c797Sbellard "show interrupts/exceptions in short format" }, 1195f193c797Sbellard { CPU_LOG_EXEC, "exec", 1196f193c797Sbellard "show trace before each executed TB (lots of logs)" }, 11979fddaa0cSbellard { CPU_LOG_TB_CPU, "cpu", 11989fddaa0cSbellard "show CPU state before bloc translation" }, 1199f193c797Sbellard #ifdef TARGET_I386 1200f193c797Sbellard { CPU_LOG_PCALL, "pcall", 1201f193c797Sbellard "show protected mode far calls/returns/exceptions" }, 1202f193c797Sbellard #endif 12038e3a9fd2Sbellard #ifdef DEBUG_IOPORT 1204fd872598Sbellard { CPU_LOG_IOPORT, "ioport", 1205fd872598Sbellard "show all i/o ports accesses" }, 12068e3a9fd2Sbellard #endif 1207f193c797Sbellard { 0, NULL, NULL }, 1208f193c797Sbellard }; 1209f193c797Sbellard 1210f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2) 1211f193c797Sbellard { 1212f193c797Sbellard if (strlen(s2) != n) 1213f193c797Sbellard return 0; 1214f193c797Sbellard return memcmp(s1, s2, n) == 0; 1215f193c797Sbellard } 1216f193c797Sbellard 1217f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */ 1218f193c797Sbellard int cpu_str_to_log_mask(const char *str) 1219f193c797Sbellard { 1220f193c797Sbellard CPULogItem *item; 1221f193c797Sbellard int mask; 1222f193c797Sbellard const char *p, *p1; 1223f193c797Sbellard 1224f193c797Sbellard p = str; 1225f193c797Sbellard mask = 0; 1226f193c797Sbellard for(;;) { 1227f193c797Sbellard p1 = strchr(p, ','); 1228f193c797Sbellard if (!p1) 1229f193c797Sbellard p1 = p + strlen(p); 12308e3a9fd2Sbellard if(cmp1(p,p1-p,"all")) { 12318e3a9fd2Sbellard for(item = cpu_log_items; item->mask != 0; item++) { 12328e3a9fd2Sbellard mask |= item->mask; 12338e3a9fd2Sbellard } 12348e3a9fd2Sbellard } else { 1235f193c797Sbellard for(item = cpu_log_items; item->mask != 0; item++) { 1236f193c797Sbellard if (cmp1(p, p1 - p, item->name)) 1237f193c797Sbellard goto found; 1238f193c797Sbellard } 1239f193c797Sbellard return 0; 12408e3a9fd2Sbellard } 1241f193c797Sbellard found: 1242f193c797Sbellard mask |= item->mask; 1243f193c797Sbellard if (*p1 != ',') 1244f193c797Sbellard break; 1245f193c797Sbellard p = p1 + 1; 1246f193c797Sbellard } 1247f193c797Sbellard return mask; 1248f193c797Sbellard } 1249ea041c0eSbellard 12507501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...) 12517501267eSbellard { 12527501267eSbellard va_list ap; 12537501267eSbellard 12547501267eSbellard va_start(ap, fmt); 12557501267eSbellard fprintf(stderr, "qemu: fatal: "); 12567501267eSbellard vfprintf(stderr, fmt, ap); 12577501267eSbellard fprintf(stderr, "\n"); 12587501267eSbellard #ifdef TARGET_I386 12597fe48483Sbellard cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); 12607fe48483Sbellard #else 12617fe48483Sbellard cpu_dump_state(env, stderr, fprintf, 0); 12627501267eSbellard #endif 12637501267eSbellard va_end(ap); 12647501267eSbellard abort(); 12657501267eSbellard } 12667501267eSbellard 12670124311eSbellard #if !defined(CONFIG_USER_ONLY) 12680124311eSbellard 1269ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not 1270ee8b7021Sbellard implemented yet) */ 1271ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global) 127233417e70Sbellard { 127333417e70Sbellard int i; 12740124311eSbellard 12759fa3e853Sbellard #if defined(DEBUG_TLB) 12769fa3e853Sbellard printf("tlb_flush:\n"); 12779fa3e853Sbellard #endif 12780124311eSbellard /* must reset current TB so that interrupts cannot modify the 12790124311eSbellard links while we are modifying them */ 12800124311eSbellard env->current_tb = NULL; 12810124311eSbellard 128233417e70Sbellard for(i = 0; i < CPU_TLB_SIZE; i++) { 128333417e70Sbellard env->tlb_read[0][i].address = -1; 128433417e70Sbellard env->tlb_write[0][i].address = -1; 128533417e70Sbellard env->tlb_read[1][i].address = -1; 128633417e70Sbellard env->tlb_write[1][i].address = -1; 128733417e70Sbellard } 12889fa3e853Sbellard 12899fa3e853Sbellard virt_page_flush(); 12908a8a608fSbellard memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); 12919fa3e853Sbellard 12929fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 12939fa3e853Sbellard munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); 12949fa3e853Sbellard #endif 129533417e70Sbellard } 129633417e70Sbellard 1297274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 129861382a50Sbellard { 129961382a50Sbellard if (addr == (tlb_entry->address & 130061382a50Sbellard (TARGET_PAGE_MASK | TLB_INVALID_MASK))) 130161382a50Sbellard tlb_entry->address = -1; 130261382a50Sbellard } 130361382a50Sbellard 13042e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr) 130533417e70Sbellard { 13069fa3e853Sbellard int i, n; 13079fa3e853Sbellard VirtPageDesc *vp; 13089fa3e853Sbellard PageDesc *p; 13099fa3e853Sbellard TranslationBlock *tb; 13100124311eSbellard 13119fa3e853Sbellard #if defined(DEBUG_TLB) 13129fa3e853Sbellard printf("tlb_flush_page: 0x%08x\n", addr); 13139fa3e853Sbellard #endif 13140124311eSbellard /* must reset current TB so that interrupts cannot modify the 13150124311eSbellard links while we are modifying them */ 13160124311eSbellard env->current_tb = NULL; 131733417e70Sbellard 131861382a50Sbellard addr &= TARGET_PAGE_MASK; 131933417e70Sbellard i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 132061382a50Sbellard tlb_flush_entry(&env->tlb_read[0][i], addr); 132161382a50Sbellard tlb_flush_entry(&env->tlb_write[0][i], addr); 132261382a50Sbellard tlb_flush_entry(&env->tlb_read[1][i], addr); 132361382a50Sbellard tlb_flush_entry(&env->tlb_write[1][i], addr); 13240124311eSbellard 13259fa3e853Sbellard /* remove from the virtual pc hash table all the TB at this 13269fa3e853Sbellard virtual address */ 13279fa3e853Sbellard 13289fa3e853Sbellard vp = virt_page_find(addr >> TARGET_PAGE_BITS); 13299fa3e853Sbellard if (vp && vp->valid_tag == virt_valid_tag) { 13309fa3e853Sbellard p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); 13319fa3e853Sbellard if (p) { 13329fa3e853Sbellard /* we remove all the links to the TBs in this virtual page */ 13339fa3e853Sbellard tb = p->first_tb; 13349fa3e853Sbellard while (tb != NULL) { 13359fa3e853Sbellard n = (long)tb & 3; 13369fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 13379fa3e853Sbellard if ((tb->pc & TARGET_PAGE_MASK) == addr || 13389fa3e853Sbellard ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) { 13399fa3e853Sbellard tb_invalidate(tb); 13409fa3e853Sbellard } 13419fa3e853Sbellard tb = tb->page_next[n]; 13429fa3e853Sbellard } 13430124311eSbellard } 134498857888Sbellard vp->valid_tag = 0; 134561382a50Sbellard } 134661382a50Sbellard 13479fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 13489fa3e853Sbellard if (addr < MMAP_AREA_END) 13499fa3e853Sbellard munmap((void *)addr, TARGET_PAGE_SIZE); 13509fa3e853Sbellard #endif 13519fa3e853Sbellard } 13529fa3e853Sbellard 13534f2ac237Sbellard static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) 13549fa3e853Sbellard { 13559fa3e853Sbellard if (addr == (tlb_entry->address & 13569fa3e853Sbellard (TARGET_PAGE_MASK | TLB_INVALID_MASK)) && 135798857888Sbellard (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE && 135898857888Sbellard (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) { 13591ccde1cbSbellard tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE; 13609fa3e853Sbellard } 13619fa3e853Sbellard } 13629fa3e853Sbellard 13639fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr' 13649fa3e853Sbellard can be detected */ 13654f2ac237Sbellard static void tlb_protect_code(CPUState *env, target_ulong addr) 136661382a50Sbellard { 136761382a50Sbellard int i; 136861382a50Sbellard 136961382a50Sbellard addr &= TARGET_PAGE_MASK; 137061382a50Sbellard i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 13719fa3e853Sbellard tlb_protect_code1(&env->tlb_write[0][i], addr); 13729fa3e853Sbellard tlb_protect_code1(&env->tlb_write[1][i], addr); 13739fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 13749fa3e853Sbellard /* NOTE: as we generated the code for this page, it is already at 13759fa3e853Sbellard least readable */ 13769fa3e853Sbellard if (addr < MMAP_AREA_END) 13779fa3e853Sbellard mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ); 13789fa3e853Sbellard #endif 13799fa3e853Sbellard } 13809fa3e853Sbellard 13819fa3e853Sbellard static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 13824f2ac237Sbellard unsigned long phys_addr) 13839fa3e853Sbellard { 13849fa3e853Sbellard if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE && 13859fa3e853Sbellard ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) { 13861ccde1cbSbellard tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 13879fa3e853Sbellard } 13889fa3e853Sbellard } 13899fa3e853Sbellard 13909fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer 13919fa3e853Sbellard tested self modifying code */ 13924f2ac237Sbellard static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr) 13939fa3e853Sbellard { 13949fa3e853Sbellard int i; 13959fa3e853Sbellard 13969fa3e853Sbellard phys_addr &= TARGET_PAGE_MASK; 13971ccde1cbSbellard phys_addr += (long)phys_ram_base; 13981ccde1cbSbellard i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 13999fa3e853Sbellard tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr); 14009fa3e853Sbellard tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr); 14019fa3e853Sbellard } 14029fa3e853Sbellard 14031ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 14041ccde1cbSbellard unsigned long start, unsigned long length) 14051ccde1cbSbellard { 14061ccde1cbSbellard unsigned long addr; 14071ccde1cbSbellard if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 14081ccde1cbSbellard addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; 14091ccde1cbSbellard if ((addr - start) < length) { 14101ccde1cbSbellard tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 14111ccde1cbSbellard } 14121ccde1cbSbellard } 14131ccde1cbSbellard } 14141ccde1cbSbellard 14151ccde1cbSbellard void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end) 14161ccde1cbSbellard { 14171ccde1cbSbellard CPUState *env; 14184f2ac237Sbellard unsigned long length, start1; 14191ccde1cbSbellard int i; 14201ccde1cbSbellard 14211ccde1cbSbellard start &= TARGET_PAGE_MASK; 14221ccde1cbSbellard end = TARGET_PAGE_ALIGN(end); 14231ccde1cbSbellard 14241ccde1cbSbellard length = end - start; 14251ccde1cbSbellard if (length == 0) 14261ccde1cbSbellard return; 14271ccde1cbSbellard memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS); 14281ccde1cbSbellard 14291ccde1cbSbellard env = cpu_single_env; 14301ccde1cbSbellard /* we modify the TLB cache so that the dirty bit will be set again 14311ccde1cbSbellard when accessing the range */ 143259817ccbSbellard start1 = start + (unsigned long)phys_ram_base; 14331ccde1cbSbellard for(i = 0; i < CPU_TLB_SIZE; i++) 143459817ccbSbellard tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); 14351ccde1cbSbellard for(i = 0; i < CPU_TLB_SIZE; i++) 143659817ccbSbellard tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); 143759817ccbSbellard 143859817ccbSbellard #if !defined(CONFIG_SOFTMMU) 143959817ccbSbellard /* XXX: this is expensive */ 144059817ccbSbellard { 144159817ccbSbellard VirtPageDesc *p; 144259817ccbSbellard int j; 144359817ccbSbellard target_ulong addr; 144459817ccbSbellard 144559817ccbSbellard for(i = 0; i < L1_SIZE; i++) { 144659817ccbSbellard p = l1_virt_map[i]; 144759817ccbSbellard if (p) { 144859817ccbSbellard addr = i << (TARGET_PAGE_BITS + L2_BITS); 144959817ccbSbellard for(j = 0; j < L2_SIZE; j++) { 145059817ccbSbellard if (p->valid_tag == virt_valid_tag && 145159817ccbSbellard p->phys_addr >= start && p->phys_addr < end && 145259817ccbSbellard (p->prot & PROT_WRITE)) { 145359817ccbSbellard if (addr < MMAP_AREA_END) { 145459817ccbSbellard mprotect((void *)addr, TARGET_PAGE_SIZE, 145559817ccbSbellard p->prot & ~PROT_WRITE); 145659817ccbSbellard } 145759817ccbSbellard } 145859817ccbSbellard addr += TARGET_PAGE_SIZE; 145959817ccbSbellard p++; 146059817ccbSbellard } 146159817ccbSbellard } 146259817ccbSbellard } 146359817ccbSbellard } 146459817ccbSbellard #endif 14651ccde1cbSbellard } 14661ccde1cbSbellard 14671ccde1cbSbellard static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 14681ccde1cbSbellard unsigned long start) 14691ccde1cbSbellard { 14701ccde1cbSbellard unsigned long addr; 14711ccde1cbSbellard if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { 14721ccde1cbSbellard addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; 14731ccde1cbSbellard if (addr == start) { 14741ccde1cbSbellard tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM; 14751ccde1cbSbellard } 14761ccde1cbSbellard } 14771ccde1cbSbellard } 14781ccde1cbSbellard 14791ccde1cbSbellard /* update the TLB corresponding to virtual page vaddr and phys addr 14801ccde1cbSbellard addr so that it is no longer dirty */ 14811ccde1cbSbellard static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) 14821ccde1cbSbellard { 14831ccde1cbSbellard CPUState *env = cpu_single_env; 14841ccde1cbSbellard int i; 14851ccde1cbSbellard 14861ccde1cbSbellard phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1; 14871ccde1cbSbellard 14881ccde1cbSbellard addr &= TARGET_PAGE_MASK; 14891ccde1cbSbellard i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 14901ccde1cbSbellard tlb_set_dirty1(&env->tlb_write[0][i], addr); 14911ccde1cbSbellard tlb_set_dirty1(&env->tlb_write[1][i], addr); 14921ccde1cbSbellard } 14931ccde1cbSbellard 149459817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address 149559817ccbSbellard is permitted. Return 0 if OK or 2 if the page could not be mapped 149659817ccbSbellard (can only happen in non SOFTMMU mode for I/O pages or pages 149759817ccbSbellard conflicting with the host address space). */ 14982e12669aSbellard int tlb_set_page(CPUState *env, target_ulong vaddr, 14992e12669aSbellard target_phys_addr_t paddr, int prot, 15009fa3e853Sbellard int is_user, int is_softmmu) 15019fa3e853Sbellard { 150292e873b9Sbellard PhysPageDesc *p; 15034f2ac237Sbellard unsigned long pd; 15049fa3e853Sbellard TranslationBlock *first_tb; 15059fa3e853Sbellard unsigned int index; 15064f2ac237Sbellard target_ulong address; 15074f2ac237Sbellard unsigned long addend; 15089fa3e853Sbellard int ret; 15099fa3e853Sbellard 151092e873b9Sbellard p = phys_page_find(paddr >> TARGET_PAGE_BITS); 151192e873b9Sbellard first_tb = NULL; 15129fa3e853Sbellard if (!p) { 15139fa3e853Sbellard pd = IO_MEM_UNASSIGNED; 15149fa3e853Sbellard } else { 151592e873b9Sbellard PageDesc *p1; 15169fa3e853Sbellard pd = p->phys_offset; 151792e873b9Sbellard if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { 151892e873b9Sbellard /* NOTE: we also allocate the page at this stage */ 151992e873b9Sbellard p1 = page_find_alloc(pd >> TARGET_PAGE_BITS); 152092e873b9Sbellard first_tb = p1->first_tb; 152192e873b9Sbellard } 15229fa3e853Sbellard } 15239fa3e853Sbellard #if defined(DEBUG_TLB) 15249fa3e853Sbellard printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n", 15259fa3e853Sbellard vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd); 15269fa3e853Sbellard #endif 15279fa3e853Sbellard 15289fa3e853Sbellard ret = 0; 15299fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 15309fa3e853Sbellard if (is_softmmu) 15319fa3e853Sbellard #endif 15329fa3e853Sbellard { 15339fa3e853Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { 15349fa3e853Sbellard /* IO memory case */ 15359fa3e853Sbellard address = vaddr | pd; 15369fa3e853Sbellard addend = paddr; 15379fa3e853Sbellard } else { 15389fa3e853Sbellard /* standard memory */ 15399fa3e853Sbellard address = vaddr; 15409fa3e853Sbellard addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); 15419fa3e853Sbellard } 15429fa3e853Sbellard 15439fa3e853Sbellard index = (vaddr >> 12) & (CPU_TLB_SIZE - 1); 15449fa3e853Sbellard addend -= vaddr; 154567b915a5Sbellard if (prot & PAGE_READ) { 15469fa3e853Sbellard env->tlb_read[is_user][index].address = address; 15479fa3e853Sbellard env->tlb_read[is_user][index].addend = addend; 15489fa3e853Sbellard } else { 15499fa3e853Sbellard env->tlb_read[is_user][index].address = -1; 15509fa3e853Sbellard env->tlb_read[is_user][index].addend = -1; 15519fa3e853Sbellard } 155267b915a5Sbellard if (prot & PAGE_WRITE) { 15539fa3e853Sbellard if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { 15549fa3e853Sbellard /* ROM: access is ignored (same as unassigned) */ 15559fa3e853Sbellard env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; 15561ccde1cbSbellard env->tlb_write[is_user][index].addend = addend; 1557d720b93dSbellard } else 1558d720b93dSbellard /* XXX: the PowerPC code seems not ready to handle 1559d720b93dSbellard self modifying code with DCBI */ 1560d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1 1561d720b93dSbellard if (first_tb) { 15629fa3e853Sbellard /* if code is present, we use a specific memory 15639fa3e853Sbellard handler. It works only for physical memory access */ 15649fa3e853Sbellard env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; 15651ccde1cbSbellard env->tlb_write[is_user][index].addend = addend; 1566d720b93dSbellard } else 1567d720b93dSbellard #endif 1568d720b93dSbellard if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 15691ccde1cbSbellard !cpu_physical_memory_is_dirty(pd)) { 15701ccde1cbSbellard env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; 15711ccde1cbSbellard env->tlb_write[is_user][index].addend = addend; 15729fa3e853Sbellard } else { 15739fa3e853Sbellard env->tlb_write[is_user][index].address = address; 15749fa3e853Sbellard env->tlb_write[is_user][index].addend = addend; 15759fa3e853Sbellard } 15769fa3e853Sbellard } else { 15779fa3e853Sbellard env->tlb_write[is_user][index].address = -1; 15789fa3e853Sbellard env->tlb_write[is_user][index].addend = -1; 15799fa3e853Sbellard } 15809fa3e853Sbellard } 15819fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 15829fa3e853Sbellard else { 15839fa3e853Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { 15849fa3e853Sbellard /* IO access: no mapping is done as it will be handled by the 15859fa3e853Sbellard soft MMU */ 15869fa3e853Sbellard if (!(env->hflags & HF_SOFTMMU_MASK)) 15879fa3e853Sbellard ret = 2; 15889fa3e853Sbellard } else { 15899fa3e853Sbellard void *map_addr; 159059817ccbSbellard 159159817ccbSbellard if (vaddr >= MMAP_AREA_END) { 159259817ccbSbellard ret = 2; 159359817ccbSbellard } else { 15949fa3e853Sbellard if (prot & PROT_WRITE) { 159559817ccbSbellard if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1596d720b93dSbellard #if defined(TARGET_HAS_SMC) || 1 159759817ccbSbellard first_tb || 1598d720b93dSbellard #endif 159959817ccbSbellard ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 160059817ccbSbellard !cpu_physical_memory_is_dirty(pd))) { 16019fa3e853Sbellard /* ROM: we do as if code was inside */ 16029fa3e853Sbellard /* if code is present, we only map as read only and save the 16039fa3e853Sbellard original mapping */ 16049fa3e853Sbellard VirtPageDesc *vp; 16059fa3e853Sbellard 16069fa3e853Sbellard vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); 16079fa3e853Sbellard vp->phys_addr = pd; 16089fa3e853Sbellard vp->prot = prot; 16099fa3e853Sbellard vp->valid_tag = virt_valid_tag; 16109fa3e853Sbellard prot &= ~PAGE_WRITE; 16119fa3e853Sbellard } 16129fa3e853Sbellard } 16139fa3e853Sbellard map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 16149fa3e853Sbellard MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); 16159fa3e853Sbellard if (map_addr == MAP_FAILED) { 16169fa3e853Sbellard cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", 16179fa3e853Sbellard paddr, vaddr); 16189fa3e853Sbellard } 16199fa3e853Sbellard } 16209fa3e853Sbellard } 162159817ccbSbellard } 16229fa3e853Sbellard #endif 16239fa3e853Sbellard return ret; 16249fa3e853Sbellard } 16259fa3e853Sbellard 16269fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the 16279fa3e853Sbellard page. Return TRUE if the fault was succesfully handled. */ 1628d720b93dSbellard int page_unprotect(unsigned long addr, unsigned long pc, void *puc) 16299fa3e853Sbellard { 16309fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 16319fa3e853Sbellard VirtPageDesc *vp; 16329fa3e853Sbellard 16339fa3e853Sbellard #if defined(DEBUG_TLB) 16349fa3e853Sbellard printf("page_unprotect: addr=0x%08x\n", addr); 16359fa3e853Sbellard #endif 16369fa3e853Sbellard addr &= TARGET_PAGE_MASK; 163759817ccbSbellard 163859817ccbSbellard /* if it is not mapped, no need to worry here */ 163959817ccbSbellard if (addr >= MMAP_AREA_END) 164059817ccbSbellard return 0; 16419fa3e853Sbellard vp = virt_page_find(addr >> TARGET_PAGE_BITS); 16429fa3e853Sbellard if (!vp) 16439fa3e853Sbellard return 0; 16449fa3e853Sbellard /* NOTE: in this case, validate_tag is _not_ tested as it 16459fa3e853Sbellard validates only the code TLB */ 16469fa3e853Sbellard if (vp->valid_tag != virt_valid_tag) 16479fa3e853Sbellard return 0; 16489fa3e853Sbellard if (!(vp->prot & PAGE_WRITE)) 16499fa3e853Sbellard return 0; 16509fa3e853Sbellard #if defined(DEBUG_TLB) 16519fa3e853Sbellard printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 16529fa3e853Sbellard addr, vp->phys_addr, vp->prot); 16539fa3e853Sbellard #endif 165459817ccbSbellard if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) 165559817ccbSbellard cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", 165659817ccbSbellard (unsigned long)addr, vp->prot); 1657d720b93dSbellard /* set the dirty bit */ 1658d720b93dSbellard phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1; 1659d720b93dSbellard /* flush the code inside */ 1660d720b93dSbellard tb_invalidate_phys_page(vp->phys_addr, pc, puc); 16619fa3e853Sbellard return 1; 16629fa3e853Sbellard #else 16639fa3e853Sbellard return 0; 16649fa3e853Sbellard #endif 166533417e70Sbellard } 166633417e70Sbellard 16670124311eSbellard #else 16680124311eSbellard 1669ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global) 16700124311eSbellard { 16710124311eSbellard } 16720124311eSbellard 16732e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr) 16740124311eSbellard { 16750124311eSbellard } 16760124311eSbellard 16772e12669aSbellard int tlb_set_page(CPUState *env, target_ulong vaddr, 16782e12669aSbellard target_phys_addr_t paddr, int prot, 16799fa3e853Sbellard int is_user, int is_softmmu) 168033417e70Sbellard { 16819fa3e853Sbellard return 0; 168233417e70Sbellard } 168333417e70Sbellard 16849fa3e853Sbellard /* dump memory mappings */ 16859fa3e853Sbellard void page_dump(FILE *f) 168633417e70Sbellard { 16879fa3e853Sbellard unsigned long start, end; 16889fa3e853Sbellard int i, j, prot, prot1; 16899fa3e853Sbellard PageDesc *p; 16909fa3e853Sbellard 16919fa3e853Sbellard fprintf(f, "%-8s %-8s %-8s %s\n", 16929fa3e853Sbellard "start", "end", "size", "prot"); 16939fa3e853Sbellard start = -1; 16949fa3e853Sbellard end = -1; 16959fa3e853Sbellard prot = 0; 16969fa3e853Sbellard for(i = 0; i <= L1_SIZE; i++) { 16979fa3e853Sbellard if (i < L1_SIZE) 16989fa3e853Sbellard p = l1_map[i]; 16999fa3e853Sbellard else 17009fa3e853Sbellard p = NULL; 17019fa3e853Sbellard for(j = 0;j < L2_SIZE; j++) { 170233417e70Sbellard if (!p) 17039fa3e853Sbellard prot1 = 0; 17049fa3e853Sbellard else 17059fa3e853Sbellard prot1 = p[j].flags; 17069fa3e853Sbellard if (prot1 != prot) { 17079fa3e853Sbellard end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); 17089fa3e853Sbellard if (start != -1) { 17099fa3e853Sbellard fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 17109fa3e853Sbellard start, end, end - start, 17119fa3e853Sbellard prot & PAGE_READ ? 'r' : '-', 17129fa3e853Sbellard prot & PAGE_WRITE ? 'w' : '-', 17139fa3e853Sbellard prot & PAGE_EXEC ? 'x' : '-'); 171433417e70Sbellard } 17159fa3e853Sbellard if (prot1 != 0) 17169fa3e853Sbellard start = end; 17179fa3e853Sbellard else 17189fa3e853Sbellard start = -1; 17199fa3e853Sbellard prot = prot1; 17209fa3e853Sbellard } 17219fa3e853Sbellard if (!p) 17229fa3e853Sbellard break; 17239fa3e853Sbellard } 17249fa3e853Sbellard } 17259fa3e853Sbellard } 17269fa3e853Sbellard 17279fa3e853Sbellard int page_get_flags(unsigned long address) 17289fa3e853Sbellard { 17299fa3e853Sbellard PageDesc *p; 17309fa3e853Sbellard 17319fa3e853Sbellard p = page_find(address >> TARGET_PAGE_BITS); 17329fa3e853Sbellard if (!p) 17339fa3e853Sbellard return 0; 17349fa3e853Sbellard return p->flags; 17359fa3e853Sbellard } 17369fa3e853Sbellard 17379fa3e853Sbellard /* modify the flags of a page and invalidate the code if 17389fa3e853Sbellard necessary. The flag PAGE_WRITE_ORG is positionned automatically 17399fa3e853Sbellard depending on PAGE_WRITE */ 17409fa3e853Sbellard void page_set_flags(unsigned long start, unsigned long end, int flags) 17419fa3e853Sbellard { 17429fa3e853Sbellard PageDesc *p; 17439fa3e853Sbellard unsigned long addr; 17449fa3e853Sbellard 17459fa3e853Sbellard start = start & TARGET_PAGE_MASK; 17469fa3e853Sbellard end = TARGET_PAGE_ALIGN(end); 17479fa3e853Sbellard if (flags & PAGE_WRITE) 17489fa3e853Sbellard flags |= PAGE_WRITE_ORG; 17499fa3e853Sbellard spin_lock(&tb_lock); 17509fa3e853Sbellard for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 17519fa3e853Sbellard p = page_find_alloc(addr >> TARGET_PAGE_BITS); 17529fa3e853Sbellard /* if the write protection is set, then we invalidate the code 17539fa3e853Sbellard inside */ 17549fa3e853Sbellard if (!(p->flags & PAGE_WRITE) && 17559fa3e853Sbellard (flags & PAGE_WRITE) && 17569fa3e853Sbellard p->first_tb) { 1757d720b93dSbellard tb_invalidate_phys_page(addr, 0, NULL); 17589fa3e853Sbellard } 17599fa3e853Sbellard p->flags = flags; 17609fa3e853Sbellard } 17619fa3e853Sbellard spin_unlock(&tb_lock); 17629fa3e853Sbellard } 17639fa3e853Sbellard 17649fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the 17659fa3e853Sbellard page. Return TRUE if the fault was succesfully handled. */ 1766d720b93dSbellard int page_unprotect(unsigned long address, unsigned long pc, void *puc) 17679fa3e853Sbellard { 17689fa3e853Sbellard unsigned int page_index, prot, pindex; 17699fa3e853Sbellard PageDesc *p, *p1; 17709fa3e853Sbellard unsigned long host_start, host_end, addr; 17719fa3e853Sbellard 177283fb7adfSbellard host_start = address & qemu_host_page_mask; 17739fa3e853Sbellard page_index = host_start >> TARGET_PAGE_BITS; 17749fa3e853Sbellard p1 = page_find(page_index); 17759fa3e853Sbellard if (!p1) 17769fa3e853Sbellard return 0; 177783fb7adfSbellard host_end = host_start + qemu_host_page_size; 17789fa3e853Sbellard p = p1; 17799fa3e853Sbellard prot = 0; 17809fa3e853Sbellard for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { 17819fa3e853Sbellard prot |= p->flags; 17829fa3e853Sbellard p++; 17839fa3e853Sbellard } 17849fa3e853Sbellard /* if the page was really writable, then we change its 17859fa3e853Sbellard protection back to writable */ 17869fa3e853Sbellard if (prot & PAGE_WRITE_ORG) { 17879fa3e853Sbellard pindex = (address - host_start) >> TARGET_PAGE_BITS; 17889fa3e853Sbellard if (!(p1[pindex].flags & PAGE_WRITE)) { 178983fb7adfSbellard mprotect((void *)host_start, qemu_host_page_size, 17909fa3e853Sbellard (prot & PAGE_BITS) | PAGE_WRITE); 17919fa3e853Sbellard p1[pindex].flags |= PAGE_WRITE; 17929fa3e853Sbellard /* and since the content will be modified, we must invalidate 17939fa3e853Sbellard the corresponding translated code. */ 1794d720b93dSbellard tb_invalidate_phys_page(address, pc, puc); 17959fa3e853Sbellard #ifdef DEBUG_TB_CHECK 17969fa3e853Sbellard tb_invalidate_check(address); 17979fa3e853Sbellard #endif 17989fa3e853Sbellard return 1; 17999fa3e853Sbellard } 18009fa3e853Sbellard } 18019fa3e853Sbellard return 0; 18029fa3e853Sbellard } 18039fa3e853Sbellard 18049fa3e853Sbellard /* call this function when system calls directly modify a memory area */ 18059fa3e853Sbellard void page_unprotect_range(uint8_t *data, unsigned long data_size) 18069fa3e853Sbellard { 18079fa3e853Sbellard unsigned long start, end, addr; 18089fa3e853Sbellard 18099fa3e853Sbellard start = (unsigned long)data; 18109fa3e853Sbellard end = start + data_size; 18119fa3e853Sbellard start &= TARGET_PAGE_MASK; 18129fa3e853Sbellard end = TARGET_PAGE_ALIGN(end); 18139fa3e853Sbellard for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 1814d720b93dSbellard page_unprotect(addr, 0, NULL); 18159fa3e853Sbellard } 18169fa3e853Sbellard } 18179fa3e853Sbellard 18181ccde1cbSbellard static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) 18191ccde1cbSbellard { 18201ccde1cbSbellard } 18219fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */ 182233417e70Sbellard 182333417e70Sbellard /* register physical memory. 'size' must be a multiple of the target 182433417e70Sbellard page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 182533417e70Sbellard io memory page */ 18262e12669aSbellard void cpu_register_physical_memory(target_phys_addr_t start_addr, 18272e12669aSbellard unsigned long size, 18282e12669aSbellard unsigned long phys_offset) 182933417e70Sbellard { 183033417e70Sbellard unsigned long addr, end_addr; 183192e873b9Sbellard PhysPageDesc *p; 183233417e70Sbellard 18335fd386f6Sbellard size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 183433417e70Sbellard end_addr = start_addr + size; 18355fd386f6Sbellard for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { 183692e873b9Sbellard p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS); 18379fa3e853Sbellard p->phys_offset = phys_offset; 18389fa3e853Sbellard if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) 183933417e70Sbellard phys_offset += TARGET_PAGE_SIZE; 184033417e70Sbellard } 184133417e70Sbellard } 184233417e70Sbellard 1843a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 184433417e70Sbellard { 184533417e70Sbellard return 0; 184633417e70Sbellard } 184733417e70Sbellard 1848a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 184933417e70Sbellard { 185033417e70Sbellard } 185133417e70Sbellard 185233417e70Sbellard static CPUReadMemoryFunc *unassigned_mem_read[3] = { 185333417e70Sbellard unassigned_mem_readb, 185433417e70Sbellard unassigned_mem_readb, 185533417e70Sbellard unassigned_mem_readb, 185633417e70Sbellard }; 185733417e70Sbellard 185833417e70Sbellard static CPUWriteMemoryFunc *unassigned_mem_write[3] = { 185933417e70Sbellard unassigned_mem_writeb, 186033417e70Sbellard unassigned_mem_writeb, 186133417e70Sbellard unassigned_mem_writeb, 186233417e70Sbellard }; 186333417e70Sbellard 18649fa3e853Sbellard /* self modifying code support in soft mmu mode : writing to a page 18659fa3e853Sbellard containing code comes to these functions */ 18669fa3e853Sbellard 1867a4193c8aSbellard static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 18689fa3e853Sbellard { 18691ccde1cbSbellard unsigned long phys_addr; 18701ccde1cbSbellard 1871274da6b2Sbellard phys_addr = addr - (unsigned long)phys_ram_base; 18729fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 1873d720b93dSbellard tb_invalidate_phys_page_fast(phys_addr, 1); 18749fa3e853Sbellard #endif 18751ccde1cbSbellard stb_raw((uint8_t *)addr, val); 18761ccde1cbSbellard phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 18779fa3e853Sbellard } 18789fa3e853Sbellard 1879a4193c8aSbellard static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 18809fa3e853Sbellard { 18811ccde1cbSbellard unsigned long phys_addr; 18821ccde1cbSbellard 1883274da6b2Sbellard phys_addr = addr - (unsigned long)phys_ram_base; 18849fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 1885d720b93dSbellard tb_invalidate_phys_page_fast(phys_addr, 2); 18869fa3e853Sbellard #endif 18871ccde1cbSbellard stw_raw((uint8_t *)addr, val); 18881ccde1cbSbellard phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 18899fa3e853Sbellard } 18909fa3e853Sbellard 1891a4193c8aSbellard static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 18929fa3e853Sbellard { 18931ccde1cbSbellard unsigned long phys_addr; 18941ccde1cbSbellard 1895274da6b2Sbellard phys_addr = addr - (unsigned long)phys_ram_base; 18969fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 1897d720b93dSbellard tb_invalidate_phys_page_fast(phys_addr, 4); 18989fa3e853Sbellard #endif 18991ccde1cbSbellard stl_raw((uint8_t *)addr, val); 19001ccde1cbSbellard phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 19019fa3e853Sbellard } 19029fa3e853Sbellard 19039fa3e853Sbellard static CPUReadMemoryFunc *code_mem_read[3] = { 19049fa3e853Sbellard NULL, /* never used */ 19059fa3e853Sbellard NULL, /* never used */ 19069fa3e853Sbellard NULL, /* never used */ 19079fa3e853Sbellard }; 19089fa3e853Sbellard 19099fa3e853Sbellard static CPUWriteMemoryFunc *code_mem_write[3] = { 19109fa3e853Sbellard code_mem_writeb, 19119fa3e853Sbellard code_mem_writew, 19129fa3e853Sbellard code_mem_writel, 19139fa3e853Sbellard }; 191433417e70Sbellard 1915a4193c8aSbellard static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 19161ccde1cbSbellard { 19171ccde1cbSbellard stb_raw((uint8_t *)addr, val); 1918d720b93dSbellard tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 19191ccde1cbSbellard } 19201ccde1cbSbellard 1921a4193c8aSbellard static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 19221ccde1cbSbellard { 19231ccde1cbSbellard stw_raw((uint8_t *)addr, val); 1924d720b93dSbellard tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 19251ccde1cbSbellard } 19261ccde1cbSbellard 1927a4193c8aSbellard static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 19281ccde1cbSbellard { 19291ccde1cbSbellard stl_raw((uint8_t *)addr, val); 1930d720b93dSbellard tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 19311ccde1cbSbellard } 19321ccde1cbSbellard 19331ccde1cbSbellard static CPUWriteMemoryFunc *notdirty_mem_write[3] = { 19341ccde1cbSbellard notdirty_mem_writeb, 19351ccde1cbSbellard notdirty_mem_writew, 19361ccde1cbSbellard notdirty_mem_writel, 19371ccde1cbSbellard }; 19381ccde1cbSbellard 193933417e70Sbellard static void io_mem_init(void) 194033417e70Sbellard { 1941a4193c8aSbellard cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL); 1942a4193c8aSbellard cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 1943a4193c8aSbellard cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL); 1944a4193c8aSbellard cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL); 19451ccde1cbSbellard io_mem_nb = 5; 19461ccde1cbSbellard 19471ccde1cbSbellard /* alloc dirty bits array */ 194859817ccbSbellard phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS); 194933417e70Sbellard } 195033417e70Sbellard 195133417e70Sbellard /* mem_read and mem_write are arrays of functions containing the 195233417e70Sbellard function to access byte (index 0), word (index 1) and dword (index 195333417e70Sbellard 2). All functions must be supplied. If io_index is non zero, the 195433417e70Sbellard corresponding io zone is modified. If it is zero, a new io zone is 195533417e70Sbellard allocated. The return value can be used with 195633417e70Sbellard cpu_register_physical_memory(). (-1) is returned if error. */ 195733417e70Sbellard int cpu_register_io_memory(int io_index, 195833417e70Sbellard CPUReadMemoryFunc **mem_read, 1959a4193c8aSbellard CPUWriteMemoryFunc **mem_write, 1960a4193c8aSbellard void *opaque) 196133417e70Sbellard { 196233417e70Sbellard int i; 196333417e70Sbellard 196433417e70Sbellard if (io_index <= 0) { 196533417e70Sbellard if (io_index >= IO_MEM_NB_ENTRIES) 196633417e70Sbellard return -1; 196733417e70Sbellard io_index = io_mem_nb++; 196833417e70Sbellard } else { 196933417e70Sbellard if (io_index >= IO_MEM_NB_ENTRIES) 197033417e70Sbellard return -1; 197133417e70Sbellard } 197233417e70Sbellard 197333417e70Sbellard for(i = 0;i < 3; i++) { 197433417e70Sbellard io_mem_read[io_index][i] = mem_read[i]; 197533417e70Sbellard io_mem_write[io_index][i] = mem_write[i]; 197633417e70Sbellard } 1977a4193c8aSbellard io_mem_opaque[io_index] = opaque; 197833417e70Sbellard return io_index << IO_MEM_SHIFT; 197933417e70Sbellard } 198061382a50Sbellard 19818926b517Sbellard CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) 19828926b517Sbellard { 19838926b517Sbellard return io_mem_write[io_index >> IO_MEM_SHIFT]; 19848926b517Sbellard } 19858926b517Sbellard 19868926b517Sbellard CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) 19878926b517Sbellard { 19888926b517Sbellard return io_mem_read[io_index >> IO_MEM_SHIFT]; 19898926b517Sbellard } 19908926b517Sbellard 199113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */ 199213eb76e0Sbellard #if defined(CONFIG_USER_ONLY) 19932e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 199413eb76e0Sbellard int len, int is_write) 199513eb76e0Sbellard { 199613eb76e0Sbellard int l, flags; 199713eb76e0Sbellard target_ulong page; 199813eb76e0Sbellard 199913eb76e0Sbellard while (len > 0) { 200013eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 200113eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 200213eb76e0Sbellard if (l > len) 200313eb76e0Sbellard l = len; 200413eb76e0Sbellard flags = page_get_flags(page); 200513eb76e0Sbellard if (!(flags & PAGE_VALID)) 200613eb76e0Sbellard return; 200713eb76e0Sbellard if (is_write) { 200813eb76e0Sbellard if (!(flags & PAGE_WRITE)) 200913eb76e0Sbellard return; 201013eb76e0Sbellard memcpy((uint8_t *)addr, buf, len); 201113eb76e0Sbellard } else { 201213eb76e0Sbellard if (!(flags & PAGE_READ)) 201313eb76e0Sbellard return; 201413eb76e0Sbellard memcpy(buf, (uint8_t *)addr, len); 201513eb76e0Sbellard } 201613eb76e0Sbellard len -= l; 201713eb76e0Sbellard buf += l; 201813eb76e0Sbellard addr += l; 201913eb76e0Sbellard } 202013eb76e0Sbellard } 202113eb76e0Sbellard #else 20222e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 202313eb76e0Sbellard int len, int is_write) 202413eb76e0Sbellard { 202513eb76e0Sbellard int l, io_index; 202613eb76e0Sbellard uint8_t *ptr; 202713eb76e0Sbellard uint32_t val; 20282e12669aSbellard target_phys_addr_t page; 20292e12669aSbellard unsigned long pd; 203092e873b9Sbellard PhysPageDesc *p; 203113eb76e0Sbellard 203213eb76e0Sbellard while (len > 0) { 203313eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 203413eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 203513eb76e0Sbellard if (l > len) 203613eb76e0Sbellard l = len; 203792e873b9Sbellard p = phys_page_find(page >> TARGET_PAGE_BITS); 203813eb76e0Sbellard if (!p) { 203913eb76e0Sbellard pd = IO_MEM_UNASSIGNED; 204013eb76e0Sbellard } else { 204113eb76e0Sbellard pd = p->phys_offset; 204213eb76e0Sbellard } 204313eb76e0Sbellard 204413eb76e0Sbellard if (is_write) { 204513eb76e0Sbellard if ((pd & ~TARGET_PAGE_MASK) != 0) { 204613eb76e0Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 204713eb76e0Sbellard if (l >= 4 && ((addr & 3) == 0)) { 204813eb76e0Sbellard /* 32 bit read access */ 204913eb76e0Sbellard val = ldl_raw(buf); 2050a4193c8aSbellard io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 205113eb76e0Sbellard l = 4; 205213eb76e0Sbellard } else if (l >= 2 && ((addr & 1) == 0)) { 205313eb76e0Sbellard /* 16 bit read access */ 205413eb76e0Sbellard val = lduw_raw(buf); 2055a4193c8aSbellard io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 205613eb76e0Sbellard l = 2; 205713eb76e0Sbellard } else { 205813eb76e0Sbellard /* 8 bit access */ 205913eb76e0Sbellard val = ldub_raw(buf); 2060a4193c8aSbellard io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); 206113eb76e0Sbellard l = 1; 206213eb76e0Sbellard } 206313eb76e0Sbellard } else { 2064b448f2f3Sbellard unsigned long addr1; 2065b448f2f3Sbellard addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 206613eb76e0Sbellard /* RAM case */ 2067b448f2f3Sbellard ptr = phys_ram_base + addr1; 206813eb76e0Sbellard memcpy(ptr, buf, l); 2069b448f2f3Sbellard /* invalidate code */ 2070b448f2f3Sbellard tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 2071b448f2f3Sbellard /* set dirty bit */ 2072b448f2f3Sbellard phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1; 207313eb76e0Sbellard } 207413eb76e0Sbellard } else { 207513eb76e0Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 207613eb76e0Sbellard (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) { 207713eb76e0Sbellard /* I/O case */ 207813eb76e0Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 207913eb76e0Sbellard if (l >= 4 && ((addr & 3) == 0)) { 208013eb76e0Sbellard /* 32 bit read access */ 2081a4193c8aSbellard val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 208213eb76e0Sbellard stl_raw(buf, val); 208313eb76e0Sbellard l = 4; 208413eb76e0Sbellard } else if (l >= 2 && ((addr & 1) == 0)) { 208513eb76e0Sbellard /* 16 bit read access */ 2086a4193c8aSbellard val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 208713eb76e0Sbellard stw_raw(buf, val); 208813eb76e0Sbellard l = 2; 208913eb76e0Sbellard } else { 209013eb76e0Sbellard /* 8 bit access */ 2091a4193c8aSbellard val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); 209213eb76e0Sbellard stb_raw(buf, val); 209313eb76e0Sbellard l = 1; 209413eb76e0Sbellard } 209513eb76e0Sbellard } else { 209613eb76e0Sbellard /* RAM case */ 209713eb76e0Sbellard ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 209813eb76e0Sbellard (addr & ~TARGET_PAGE_MASK); 209913eb76e0Sbellard memcpy(buf, ptr, l); 210013eb76e0Sbellard } 210113eb76e0Sbellard } 210213eb76e0Sbellard len -= l; 210313eb76e0Sbellard buf += l; 210413eb76e0Sbellard addr += l; 210513eb76e0Sbellard } 210613eb76e0Sbellard } 210713eb76e0Sbellard #endif 210813eb76e0Sbellard 210913eb76e0Sbellard /* virtual memory access for debug */ 2110b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 2111b448f2f3Sbellard uint8_t *buf, int len, int is_write) 211213eb76e0Sbellard { 211313eb76e0Sbellard int l; 211413eb76e0Sbellard target_ulong page, phys_addr; 211513eb76e0Sbellard 211613eb76e0Sbellard while (len > 0) { 211713eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 211813eb76e0Sbellard phys_addr = cpu_get_phys_page_debug(env, page); 211913eb76e0Sbellard /* if no physical page mapped, return an error */ 212013eb76e0Sbellard if (phys_addr == -1) 212113eb76e0Sbellard return -1; 212213eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 212313eb76e0Sbellard if (l > len) 212413eb76e0Sbellard l = len; 2125b448f2f3Sbellard cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 2126b448f2f3Sbellard buf, l, is_write); 212713eb76e0Sbellard len -= l; 212813eb76e0Sbellard buf += l; 212913eb76e0Sbellard addr += l; 213013eb76e0Sbellard } 213113eb76e0Sbellard return 0; 213213eb76e0Sbellard } 213313eb76e0Sbellard 213461382a50Sbellard #if !defined(CONFIG_USER_ONLY) 213561382a50Sbellard 213661382a50Sbellard #define MMUSUFFIX _cmmu 213761382a50Sbellard #define GETPC() NULL 213861382a50Sbellard #define env cpu_single_env 2139b769d8feSbellard #define SOFTMMU_CODE_ACCESS 214061382a50Sbellard 214161382a50Sbellard #define SHIFT 0 214261382a50Sbellard #include "softmmu_template.h" 214361382a50Sbellard 214461382a50Sbellard #define SHIFT 1 214561382a50Sbellard #include "softmmu_template.h" 214661382a50Sbellard 214761382a50Sbellard #define SHIFT 2 214861382a50Sbellard #include "softmmu_template.h" 214961382a50Sbellard 215061382a50Sbellard #define SHIFT 3 215161382a50Sbellard #include "softmmu_template.h" 215261382a50Sbellard 215361382a50Sbellard #undef env 215461382a50Sbellard 215561382a50Sbellard #endif 2156