xref: /qemu/system/physmem.c (revision 4c0960c0c483fffc5f8e1dab169d946ac295bf44)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard #include <stdlib.h>
2754936004Sbellard #include <stdio.h>
2854936004Sbellard #include <stdarg.h>
2954936004Sbellard #include <string.h>
3054936004Sbellard #include <errno.h>
3154936004Sbellard #include <unistd.h>
3254936004Sbellard #include <inttypes.h>
3354936004Sbellard 
346180a181Sbellard #include "cpu.h"
356180a181Sbellard #include "exec-all.h"
36ca10f867Saurel32 #include "qemu-common.h"
37b67d9a52Sbellard #include "tcg.h"
38b3c7724cSpbrook #include "hw/hw.h"
3974576198Saliguori #include "osdep.h"
407ba1e619Saliguori #include "kvm.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
4353a5960aSpbrook #endif
4454936004Sbellard 
45fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4666e85a21Sbellard //#define DEBUG_FLUSH
479fa3e853Sbellard //#define DEBUG_TLB
4867d3b957Spbrook //#define DEBUG_UNASSIGNED
49fd6ce8f6Sbellard 
50fd6ce8f6Sbellard /* make various TB consistency checks */
51fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5298857888Sbellard //#define DEBUG_TLB_CHECK
53fd6ce8f6Sbellard 
541196be37Sths //#define DEBUG_IOPORT
55db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
561196be37Sths 
5799773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5899773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
5999773bd4Spbrook #undef DEBUG_TB_CHECK
6099773bd4Spbrook #endif
6199773bd4Spbrook 
629fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
639fa3e853Sbellard 
64108c49b8Sbellard #if defined(TARGET_SPARC64)
65108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
665dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
675dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
68bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
69bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
70bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
71108c49b8Sbellard #elif defined(TARGET_PPC64)
72108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
734a1418e0SAnthony Liguori #elif defined(TARGET_X86_64)
7400f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
754a1418e0SAnthony Liguori #elif defined(TARGET_I386)
7600f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77108c49b8Sbellard #else
78108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
79108c49b8Sbellard #endif
80108c49b8Sbellard 
81bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8226a5f13bSbellard int code_gen_max_blocks;
839fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84bdaf78e0Sblueswir1 static int nb_tbs;
85eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
86eb51d102Sbellard spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87fd6ce8f6Sbellard 
88141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
89141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
90141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
91d03d860bSblueswir1  section close to code segment. */
92d03d860bSblueswir1 #define code_gen_section                                \
93d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
94d03d860bSblueswir1     __attribute__((aligned (32)))
95f8e2af11SStefan Weil #elif defined(_WIN32)
96f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
97f8e2af11SStefan Weil #define code_gen_section                                \
98f8e2af11SStefan Weil     __attribute__((aligned (16)))
99d03d860bSblueswir1 #else
100d03d860bSblueswir1 #define code_gen_section                                \
101d03d860bSblueswir1     __attribute__((aligned (32)))
102d03d860bSblueswir1 #endif
103d03d860bSblueswir1 
104d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
105bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
106bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10726a5f13bSbellard /* threshold to flush the translated code buffer */
108bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
109fd6ce8f6Sbellard uint8_t *code_gen_ptr;
110fd6ce8f6Sbellard 
111e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1129fa3e853Sbellard int phys_ram_fd;
1131ccde1cbSbellard uint8_t *phys_ram_dirty;
11474576198Saliguori static int in_migration;
11594a6b54fSpbrook 
11694a6b54fSpbrook typedef struct RAMBlock {
11794a6b54fSpbrook     uint8_t *host;
11894a6b54fSpbrook     ram_addr_t offset;
11994a6b54fSpbrook     ram_addr_t length;
12094a6b54fSpbrook     struct RAMBlock *next;
12194a6b54fSpbrook } RAMBlock;
12294a6b54fSpbrook 
12394a6b54fSpbrook static RAMBlock *ram_blocks;
12494a6b54fSpbrook /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125ccbb4d44SStuart Brady    then we can no longer assume contiguous ram offsets, and external uses
12694a6b54fSpbrook    of this variable will break.  */
12794a6b54fSpbrook ram_addr_t last_ram_offset;
128e2eef170Spbrook #endif
1299fa3e853Sbellard 
1306a00d601Sbellard CPUState *first_cpu;
1316a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1326a00d601Sbellard    cpu_exec() */
1336a00d601Sbellard CPUState *cpu_single_env;
1342e70f6efSpbrook /* 0 = Do not count executed instructions.
135bf20dc07Sths    1 = Precise instruction counting.
1362e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1372e70f6efSpbrook int use_icount = 0;
1382e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1392e70f6efSpbrook    include some instructions that have not yet been executed.  */
1402e70f6efSpbrook int64_t qemu_icount;
1416a00d601Sbellard 
14254936004Sbellard typedef struct PageDesc {
14392e873b9Sbellard     /* list of TBs intersecting this ram page */
144fd6ce8f6Sbellard     TranslationBlock *first_tb;
1459fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1469fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1479fa3e853Sbellard     unsigned int code_write_count;
1489fa3e853Sbellard     uint8_t *code_bitmap;
1499fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1509fa3e853Sbellard     unsigned long flags;
1519fa3e853Sbellard #endif
15254936004Sbellard } PageDesc;
15354936004Sbellard 
15492e873b9Sbellard typedef struct PhysPageDesc {
1550f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
15600f82b8aSaurel32     ram_addr_t phys_offset;
1578da3ff18Spbrook     ram_addr_t region_offset;
15892e873b9Sbellard } PhysPageDesc;
15992e873b9Sbellard 
16054936004Sbellard #define L2_BITS 10
161bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
163bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
164bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
165bedb69eaSj_mayer  */
166bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167bedb69eaSj_mayer #else
16803875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169bedb69eaSj_mayer #endif
17054936004Sbellard 
17154936004Sbellard #define L1_SIZE (1 << L1_BITS)
17254936004Sbellard #define L2_SIZE (1 << L2_BITS)
17354936004Sbellard 
17483fb7adfSbellard unsigned long qemu_real_host_page_size;
17583fb7adfSbellard unsigned long qemu_host_page_bits;
17683fb7adfSbellard unsigned long qemu_host_page_size;
17783fb7adfSbellard unsigned long qemu_host_page_mask;
17854936004Sbellard 
17992e873b9Sbellard /* XXX: for system emulation, it could just be an array */
18054936004Sbellard static PageDesc *l1_map[L1_SIZE];
181bdaf78e0Sblueswir1 static PhysPageDesc **l1_phys_map;
18254936004Sbellard 
183e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
184e2eef170Spbrook static void io_mem_init(void);
185e2eef170Spbrook 
18633417e70Sbellard /* io memory support */
18733417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
18833417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
1916658ffb8Spbrook static int io_mem_watch;
1926658ffb8Spbrook #endif
19333417e70Sbellard 
19434865134Sbellard /* log support */
195d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
19634865134Sbellard FILE *logfile;
19734865134Sbellard int loglevel;
198e735b91cSpbrook static int log_append = 0;
19934865134Sbellard 
200e3db7226Sbellard /* statistics */
201e3db7226Sbellard static int tlb_flush_count;
202e3db7226Sbellard static int tb_flush_count;
203e3db7226Sbellard static int tb_phys_invalidate_count;
204e3db7226Sbellard 
205db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206db7b5426Sblueswir1 typedef struct subpage_t {
207db7b5426Sblueswir1     target_phys_addr_t base;
208d60efc6bSBlue Swirl     CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209d60efc6bSBlue Swirl     CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2103ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
2118da3ff18Spbrook     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212db7b5426Sblueswir1 } subpage_t;
213db7b5426Sblueswir1 
2147cb69caeSbellard #ifdef _WIN32
2157cb69caeSbellard static void map_exec(void *addr, long size)
2167cb69caeSbellard {
2177cb69caeSbellard     DWORD old_protect;
2187cb69caeSbellard     VirtualProtect(addr, size,
2197cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2207cb69caeSbellard 
2217cb69caeSbellard }
2227cb69caeSbellard #else
2237cb69caeSbellard static void map_exec(void *addr, long size)
2247cb69caeSbellard {
2254369415fSbellard     unsigned long start, end, page_size;
2267cb69caeSbellard 
2274369415fSbellard     page_size = getpagesize();
2287cb69caeSbellard     start = (unsigned long)addr;
2294369415fSbellard     start &= ~(page_size - 1);
2307cb69caeSbellard 
2317cb69caeSbellard     end = (unsigned long)addr + size;
2324369415fSbellard     end += page_size - 1;
2334369415fSbellard     end &= ~(page_size - 1);
2347cb69caeSbellard 
2357cb69caeSbellard     mprotect((void *)start, end - start,
2367cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2377cb69caeSbellard }
2387cb69caeSbellard #endif
2397cb69caeSbellard 
240b346ff46Sbellard static void page_init(void)
24154936004Sbellard {
24283fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
24354936004Sbellard        TARGET_PAGE_SIZE */
244c2b48b69Saliguori #ifdef _WIN32
245c2b48b69Saliguori     {
246c2b48b69Saliguori         SYSTEM_INFO system_info;
247c2b48b69Saliguori 
248c2b48b69Saliguori         GetSystemInfo(&system_info);
249c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
250c2b48b69Saliguori     }
251c2b48b69Saliguori #else
252c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
253c2b48b69Saliguori #endif
25483fb7adfSbellard     if (qemu_host_page_size == 0)
25583fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
25683fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
25783fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
25883fb7adfSbellard     qemu_host_page_bits = 0;
25983fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
26083fb7adfSbellard         qemu_host_page_bits++;
26183fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
262108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
26450a9569bSbalrog 
26550a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
26650a9569bSbalrog     {
26750a9569bSbalrog         long long startaddr, endaddr;
26850a9569bSbalrog         FILE *f;
26950a9569bSbalrog         int n;
27050a9569bSbalrog 
271c8a706feSpbrook         mmap_lock();
2720776590dSpbrook         last_brk = (unsigned long)sbrk(0);
27350a9569bSbalrog         f = fopen("/proc/self/maps", "r");
27450a9569bSbalrog         if (f) {
27550a9569bSbalrog             do {
27650a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
27750a9569bSbalrog                 if (n == 2) {
278e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
279e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
281e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
28350a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
28450a9569bSbalrog                                    PAGE_RESERVED);
28550a9569bSbalrog                 }
28650a9569bSbalrog             } while (!feof(f));
28750a9569bSbalrog             fclose(f);
28850a9569bSbalrog         }
289c8a706feSpbrook         mmap_unlock();
29050a9569bSbalrog     }
29150a9569bSbalrog #endif
29254936004Sbellard }
29354936004Sbellard 
294434929bfSaliguori static inline PageDesc **page_l1_map(target_ulong index)
29554936004Sbellard {
29617e2377aSpbrook #if TARGET_LONG_BITS > 32
29717e2377aSpbrook     /* Host memory outside guest VM.  For 32-bit targets we have already
29817e2377aSpbrook        excluded high addresses.  */
299d8173e0fSths     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
30017e2377aSpbrook         return NULL;
30117e2377aSpbrook #endif
302434929bfSaliguori     return &l1_map[index >> L2_BITS];
303434929bfSaliguori }
304434929bfSaliguori 
305434929bfSaliguori static inline PageDesc *page_find_alloc(target_ulong index)
306434929bfSaliguori {
307434929bfSaliguori     PageDesc **lp, *p;
308434929bfSaliguori     lp = page_l1_map(index);
309434929bfSaliguori     if (!lp)
310434929bfSaliguori         return NULL;
311434929bfSaliguori 
31254936004Sbellard     p = *lp;
31354936004Sbellard     if (!p) {
31454936004Sbellard         /* allocate if not found */
31517e2377aSpbrook #if defined(CONFIG_USER_ONLY)
31617e2377aSpbrook         size_t len = sizeof(PageDesc) * L2_SIZE;
31717e2377aSpbrook         /* Don't use qemu_malloc because it may recurse.  */
318660f11beSBlue Swirl         p = mmap(NULL, len, PROT_READ | PROT_WRITE,
31917e2377aSpbrook                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
32054936004Sbellard         *lp = p;
321fb1c2cd7Saurel32         if (h2g_valid(p)) {
322fb1c2cd7Saurel32             unsigned long addr = h2g(p);
32317e2377aSpbrook             page_set_flags(addr & TARGET_PAGE_MASK,
32417e2377aSpbrook                            TARGET_PAGE_ALIGN(addr + len),
32517e2377aSpbrook                            PAGE_RESERVED);
32617e2377aSpbrook         }
32717e2377aSpbrook #else
32817e2377aSpbrook         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
32917e2377aSpbrook         *lp = p;
33017e2377aSpbrook #endif
33154936004Sbellard     }
33254936004Sbellard     return p + (index & (L2_SIZE - 1));
33354936004Sbellard }
33454936004Sbellard 
33500f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
33654936004Sbellard {
337434929bfSaliguori     PageDesc **lp, *p;
338434929bfSaliguori     lp = page_l1_map(index);
339434929bfSaliguori     if (!lp)
340434929bfSaliguori         return NULL;
34154936004Sbellard 
342434929bfSaliguori     p = *lp;
343660f11beSBlue Swirl     if (!p) {
344660f11beSBlue Swirl         return NULL;
345660f11beSBlue Swirl     }
346fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
34754936004Sbellard }
34854936004Sbellard 
349108c49b8Sbellard static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
35092e873b9Sbellard {
351108c49b8Sbellard     void **lp, **p;
352e3f4e2a4Spbrook     PhysPageDesc *pd;
35392e873b9Sbellard 
354108c49b8Sbellard     p = (void **)l1_phys_map;
355108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356108c49b8Sbellard 
357108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359108c49b8Sbellard #endif
360108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
36192e873b9Sbellard     p = *lp;
36292e873b9Sbellard     if (!p) {
36392e873b9Sbellard         /* allocate if not found */
364108c49b8Sbellard         if (!alloc)
365108c49b8Sbellard             return NULL;
366108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
368108c49b8Sbellard         *lp = p;
369108c49b8Sbellard     }
370108c49b8Sbellard #endif
371108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372e3f4e2a4Spbrook     pd = *lp;
373e3f4e2a4Spbrook     if (!pd) {
374e3f4e2a4Spbrook         int i;
375108c49b8Sbellard         /* allocate if not found */
376108c49b8Sbellard         if (!alloc)
377108c49b8Sbellard             return NULL;
378e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379e3f4e2a4Spbrook         *lp = pd;
38067c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
381e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
38267c4d23cSpbrook           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
38367c4d23cSpbrook         }
38492e873b9Sbellard     }
385e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
38692e873b9Sbellard }
38792e873b9Sbellard 
388108c49b8Sbellard static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
38992e873b9Sbellard {
390108c49b8Sbellard     return phys_page_find_alloc(index, 0);
39192e873b9Sbellard }
39292e873b9Sbellard 
3939fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
3946a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr);
3953a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3963a7d929eSbellard                                     target_ulong vaddr);
397c8a706feSpbrook #define mmap_lock() do { } while(0)
398c8a706feSpbrook #define mmap_unlock() do { } while(0)
3999fa3e853Sbellard #endif
400fd6ce8f6Sbellard 
4014369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4024369415fSbellard 
4034369415fSbellard #if defined(CONFIG_USER_ONLY)
404ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4054369415fSbellard    user mode. It will change when a dedicated libc will be used */
4064369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4074369415fSbellard #endif
4084369415fSbellard 
4094369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4104369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
4114369415fSbellard #endif
4124369415fSbellard 
4138fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
41426a5f13bSbellard {
4154369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4164369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4174369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4184369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4194369415fSbellard #else
42026a5f13bSbellard     code_gen_buffer_size = tb_size;
42126a5f13bSbellard     if (code_gen_buffer_size == 0) {
4224369415fSbellard #if defined(CONFIG_USER_ONLY)
4234369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4244369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4254369415fSbellard #else
426ccbb4d44SStuart Brady         /* XXX: needs adjustments */
42794a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4284369415fSbellard #endif
42926a5f13bSbellard     }
43026a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
43126a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
43226a5f13bSbellard     /* The code gen buffer location may have constraints depending on
43326a5f13bSbellard        the host cpu and OS */
43426a5f13bSbellard #if defined(__linux__)
43526a5f13bSbellard     {
43626a5f13bSbellard         int flags;
437141ac468Sblueswir1         void *start = NULL;
438141ac468Sblueswir1 
43926a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
44026a5f13bSbellard #if defined(__x86_64__)
44126a5f13bSbellard         flags |= MAP_32BIT;
44226a5f13bSbellard         /* Cannot map more than that */
44326a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
44426a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
445141ac468Sblueswir1 #elif defined(__sparc_v9__)
446141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
447141ac468Sblueswir1         flags |= MAP_FIXED;
448141ac468Sblueswir1         start = (void *) 0x60000000UL;
449141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
450141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
4511cb0661eSbalrog #elif defined(__arm__)
45263d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
4531cb0661eSbalrog         flags |= MAP_FIXED;
4541cb0661eSbalrog         start = (void *) 0x01000000UL;
4551cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
4561cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
45726a5f13bSbellard #endif
458141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
45926a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
46026a5f13bSbellard                                flags, -1, 0);
46126a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
46226a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
46326a5f13bSbellard             exit(1);
46426a5f13bSbellard         }
46526a5f13bSbellard     }
466c5e97233Sblueswir1 #elif defined(__FreeBSD__) || defined(__DragonFly__)
46706e67a82Saliguori     {
46806e67a82Saliguori         int flags;
46906e67a82Saliguori         void *addr = NULL;
47006e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
47106e67a82Saliguori #if defined(__x86_64__)
47206e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
47306e67a82Saliguori          * 0x40000000 is free */
47406e67a82Saliguori         flags |= MAP_FIXED;
47506e67a82Saliguori         addr = (void *)0x40000000;
47606e67a82Saliguori         /* Cannot map more than that */
47706e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
47806e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
47906e67a82Saliguori #endif
48006e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
48106e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
48206e67a82Saliguori                                flags, -1, 0);
48306e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
48406e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
48506e67a82Saliguori             exit(1);
48606e67a82Saliguori         }
48706e67a82Saliguori     }
48826a5f13bSbellard #else
48926a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
49026a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
49126a5f13bSbellard #endif
4924369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
49326a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
49426a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
49526a5f13bSbellard         code_gen_max_block_size();
49626a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
49726a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
49826a5f13bSbellard }
49926a5f13bSbellard 
50026a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
50126a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
50226a5f13bSbellard    size. */
50326a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
50426a5f13bSbellard {
50526a5f13bSbellard     cpu_gen_init();
50626a5f13bSbellard     code_gen_alloc(tb_size);
50726a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5084369415fSbellard     page_init();
509e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
51026a5f13bSbellard     io_mem_init();
511e2eef170Spbrook #endif
51226a5f13bSbellard }
51326a5f13bSbellard 
5149656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5159656f324Spbrook 
5169656f324Spbrook #define CPU_COMMON_SAVE_VERSION 1
5179656f324Spbrook 
5189656f324Spbrook static void cpu_common_save(QEMUFile *f, void *opaque)
5199656f324Spbrook {
5209656f324Spbrook     CPUState *env = opaque;
5219656f324Spbrook 
5224c0960c0SAvi Kivity     cpu_synchronize_state(env);
523b0a46a33SJan Kiszka 
5249656f324Spbrook     qemu_put_be32s(f, &env->halted);
5259656f324Spbrook     qemu_put_be32s(f, &env->interrupt_request);
5269656f324Spbrook }
5279656f324Spbrook 
5289656f324Spbrook static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
5299656f324Spbrook {
5309656f324Spbrook     CPUState *env = opaque;
5319656f324Spbrook 
5324c0960c0SAvi Kivity     cpu_synchronize_state(env);
5339656f324Spbrook     if (version_id != CPU_COMMON_SAVE_VERSION)
5349656f324Spbrook         return -EINVAL;
5359656f324Spbrook 
5369656f324Spbrook     qemu_get_be32s(f, &env->halted);
53775f482aeSpbrook     qemu_get_be32s(f, &env->interrupt_request);
5383098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5393098dba0Saurel32        version_id is increased. */
5403098dba0Saurel32     env->interrupt_request &= ~0x01;
5419656f324Spbrook     tlb_flush(env, 1);
5429656f324Spbrook 
5439656f324Spbrook     return 0;
5449656f324Spbrook }
5459656f324Spbrook #endif
5469656f324Spbrook 
547950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
548950f1472SGlauber Costa {
549950f1472SGlauber Costa     CPUState *env = first_cpu;
550950f1472SGlauber Costa 
551950f1472SGlauber Costa     while (env) {
552950f1472SGlauber Costa         if (env->cpu_index == cpu)
553950f1472SGlauber Costa             break;
554950f1472SGlauber Costa         env = env->next_cpu;
555950f1472SGlauber Costa     }
556950f1472SGlauber Costa 
557950f1472SGlauber Costa     return env;
558950f1472SGlauber Costa }
559950f1472SGlauber Costa 
5606a00d601Sbellard void cpu_exec_init(CPUState *env)
561fd6ce8f6Sbellard {
5626a00d601Sbellard     CPUState **penv;
5636a00d601Sbellard     int cpu_index;
5646a00d601Sbellard 
565c2764719Spbrook #if defined(CONFIG_USER_ONLY)
566c2764719Spbrook     cpu_list_lock();
567c2764719Spbrook #endif
5686a00d601Sbellard     env->next_cpu = NULL;
5696a00d601Sbellard     penv = &first_cpu;
5706a00d601Sbellard     cpu_index = 0;
5716a00d601Sbellard     while (*penv != NULL) {
5721e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
5736a00d601Sbellard         cpu_index++;
5746a00d601Sbellard     }
5756a00d601Sbellard     env->cpu_index = cpu_index;
576268a362cSaliguori     env->numa_node = 0;
577c0ce998eSaliguori     TAILQ_INIT(&env->breakpoints);
578c0ce998eSaliguori     TAILQ_INIT(&env->watchpoints);
5796a00d601Sbellard     *penv = env;
580c2764719Spbrook #if defined(CONFIG_USER_ONLY)
581c2764719Spbrook     cpu_list_unlock();
582c2764719Spbrook #endif
583b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5849656f324Spbrook     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
5859656f324Spbrook                     cpu_common_save, cpu_common_load, env);
586b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587b3c7724cSpbrook                     cpu_save, cpu_load, env);
588b3c7724cSpbrook #endif
589fd6ce8f6Sbellard }
590fd6ce8f6Sbellard 
5919fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
5929fa3e853Sbellard {
5939fa3e853Sbellard     if (p->code_bitmap) {
59459817ccbSbellard         qemu_free(p->code_bitmap);
5959fa3e853Sbellard         p->code_bitmap = NULL;
5969fa3e853Sbellard     }
5979fa3e853Sbellard     p->code_write_count = 0;
5989fa3e853Sbellard }
5999fa3e853Sbellard 
600fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
601fd6ce8f6Sbellard static void page_flush_tb(void)
602fd6ce8f6Sbellard {
603fd6ce8f6Sbellard     int i, j;
604fd6ce8f6Sbellard     PageDesc *p;
605fd6ce8f6Sbellard 
606fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
607fd6ce8f6Sbellard         p = l1_map[i];
608fd6ce8f6Sbellard         if (p) {
6099fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
6109fa3e853Sbellard                 p->first_tb = NULL;
6119fa3e853Sbellard                 invalidate_page_bitmap(p);
6129fa3e853Sbellard                 p++;
6139fa3e853Sbellard             }
614fd6ce8f6Sbellard         }
615fd6ce8f6Sbellard     }
616fd6ce8f6Sbellard }
617fd6ce8f6Sbellard 
618fd6ce8f6Sbellard /* flush all the translation blocks */
619d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
6206a00d601Sbellard void tb_flush(CPUState *env1)
621fd6ce8f6Sbellard {
6226a00d601Sbellard     CPUState *env;
6230124311eSbellard #if defined(DEBUG_FLUSH)
624ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
626ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
627ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
628fd6ce8f6Sbellard #endif
62926a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
630a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
631a208e54aSpbrook 
632fd6ce8f6Sbellard     nb_tbs = 0;
6336a00d601Sbellard 
6346a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
6358a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
6366a00d601Sbellard     }
6379fa3e853Sbellard 
6388a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
639fd6ce8f6Sbellard     page_flush_tb();
6409fa3e853Sbellard 
641fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
642d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
643d4e8164fSbellard        expensive */
644e3db7226Sbellard     tb_flush_count++;
645fd6ce8f6Sbellard }
646fd6ce8f6Sbellard 
647fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
648fd6ce8f6Sbellard 
649bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
650fd6ce8f6Sbellard {
651fd6ce8f6Sbellard     TranslationBlock *tb;
652fd6ce8f6Sbellard     int i;
653fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
65499773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
65599773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
6580bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
6590bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
66099773bd4Spbrook                        address, (long)tb->pc, tb->size);
661fd6ce8f6Sbellard             }
662fd6ce8f6Sbellard         }
663fd6ce8f6Sbellard     }
664fd6ce8f6Sbellard }
665fd6ce8f6Sbellard 
666fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
667fd6ce8f6Sbellard static void tb_page_check(void)
668fd6ce8f6Sbellard {
669fd6ce8f6Sbellard     TranslationBlock *tb;
670fd6ce8f6Sbellard     int i, flags1, flags2;
671fd6ce8f6Sbellard 
67299773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
67399773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
674fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
675fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
676fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
677fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
67899773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
679fd6ce8f6Sbellard             }
680fd6ce8f6Sbellard         }
681fd6ce8f6Sbellard     }
682fd6ce8f6Sbellard }
683fd6ce8f6Sbellard 
684fd6ce8f6Sbellard #endif
685fd6ce8f6Sbellard 
686fd6ce8f6Sbellard /* invalidate one TB */
687fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688fd6ce8f6Sbellard                              int next_offset)
689fd6ce8f6Sbellard {
690fd6ce8f6Sbellard     TranslationBlock *tb1;
691fd6ce8f6Sbellard     for(;;) {
692fd6ce8f6Sbellard         tb1 = *ptb;
693fd6ce8f6Sbellard         if (tb1 == tb) {
694fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695fd6ce8f6Sbellard             break;
696fd6ce8f6Sbellard         }
697fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698fd6ce8f6Sbellard     }
699fd6ce8f6Sbellard }
700fd6ce8f6Sbellard 
7019fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
7029fa3e853Sbellard {
7039fa3e853Sbellard     TranslationBlock *tb1;
7049fa3e853Sbellard     unsigned int n1;
7059fa3e853Sbellard 
7069fa3e853Sbellard     for(;;) {
7079fa3e853Sbellard         tb1 = *ptb;
7089fa3e853Sbellard         n1 = (long)tb1 & 3;
7099fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7109fa3e853Sbellard         if (tb1 == tb) {
7119fa3e853Sbellard             *ptb = tb1->page_next[n1];
7129fa3e853Sbellard             break;
7139fa3e853Sbellard         }
7149fa3e853Sbellard         ptb = &tb1->page_next[n1];
7159fa3e853Sbellard     }
7169fa3e853Sbellard }
7179fa3e853Sbellard 
718d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719d4e8164fSbellard {
720d4e8164fSbellard     TranslationBlock *tb1, **ptb;
721d4e8164fSbellard     unsigned int n1;
722d4e8164fSbellard 
723d4e8164fSbellard     ptb = &tb->jmp_next[n];
724d4e8164fSbellard     tb1 = *ptb;
725d4e8164fSbellard     if (tb1) {
726d4e8164fSbellard         /* find tb(n) in circular list */
727d4e8164fSbellard         for(;;) {
728d4e8164fSbellard             tb1 = *ptb;
729d4e8164fSbellard             n1 = (long)tb1 & 3;
730d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
731d4e8164fSbellard             if (n1 == n && tb1 == tb)
732d4e8164fSbellard                 break;
733d4e8164fSbellard             if (n1 == 2) {
734d4e8164fSbellard                 ptb = &tb1->jmp_first;
735d4e8164fSbellard             } else {
736d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
737d4e8164fSbellard             }
738d4e8164fSbellard         }
739d4e8164fSbellard         /* now we can suppress tb(n) from the list */
740d4e8164fSbellard         *ptb = tb->jmp_next[n];
741d4e8164fSbellard 
742d4e8164fSbellard         tb->jmp_next[n] = NULL;
743d4e8164fSbellard     }
744d4e8164fSbellard }
745d4e8164fSbellard 
746d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
747d4e8164fSbellard    another TB */
748d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
749d4e8164fSbellard {
750d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751d4e8164fSbellard }
752d4e8164fSbellard 
7532e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754fd6ce8f6Sbellard {
7556a00d601Sbellard     CPUState *env;
756fd6ce8f6Sbellard     PageDesc *p;
7578a40a180Sbellard     unsigned int h, n1;
75800f82b8aSaurel32     target_phys_addr_t phys_pc;
7598a40a180Sbellard     TranslationBlock *tb1, *tb2;
760fd6ce8f6Sbellard 
7619fa3e853Sbellard     /* remove the TB from the hash list */
7629fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
7639fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
7649fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
7659fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
7669fa3e853Sbellard 
7679fa3e853Sbellard     /* remove the TB from the page list */
7689fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
7699fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
7709fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7719fa3e853Sbellard         invalidate_page_bitmap(p);
7729fa3e853Sbellard     }
7739fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
7749fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
7759fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7769fa3e853Sbellard         invalidate_page_bitmap(p);
7779fa3e853Sbellard     }
7789fa3e853Sbellard 
7798a40a180Sbellard     tb_invalidated_flag = 1;
7808a40a180Sbellard 
7818a40a180Sbellard     /* remove the TB from the hash list */
7828a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
7836a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
7846a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
7856a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
7866a00d601Sbellard     }
7878a40a180Sbellard 
7888a40a180Sbellard     /* suppress this TB from the two jump lists */
7898a40a180Sbellard     tb_jmp_remove(tb, 0);
7908a40a180Sbellard     tb_jmp_remove(tb, 1);
7918a40a180Sbellard 
7928a40a180Sbellard     /* suppress any remaining jumps to this TB */
7938a40a180Sbellard     tb1 = tb->jmp_first;
7948a40a180Sbellard     for(;;) {
7958a40a180Sbellard         n1 = (long)tb1 & 3;
7968a40a180Sbellard         if (n1 == 2)
7978a40a180Sbellard             break;
7988a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7998a40a180Sbellard         tb2 = tb1->jmp_next[n1];
8008a40a180Sbellard         tb_reset_jump(tb1, n1);
8018a40a180Sbellard         tb1->jmp_next[n1] = NULL;
8028a40a180Sbellard         tb1 = tb2;
8038a40a180Sbellard     }
8048a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
8058a40a180Sbellard 
806e3db7226Sbellard     tb_phys_invalidate_count++;
8079fa3e853Sbellard }
8089fa3e853Sbellard 
8099fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8109fa3e853Sbellard {
8119fa3e853Sbellard     int end, mask, end1;
8129fa3e853Sbellard 
8139fa3e853Sbellard     end = start + len;
8149fa3e853Sbellard     tab += start >> 3;
8159fa3e853Sbellard     mask = 0xff << (start & 7);
8169fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8179fa3e853Sbellard         if (start < end) {
8189fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8199fa3e853Sbellard             *tab |= mask;
8209fa3e853Sbellard         }
8219fa3e853Sbellard     } else {
8229fa3e853Sbellard         *tab++ |= mask;
8239fa3e853Sbellard         start = (start + 8) & ~7;
8249fa3e853Sbellard         end1 = end & ~7;
8259fa3e853Sbellard         while (start < end1) {
8269fa3e853Sbellard             *tab++ = 0xff;
8279fa3e853Sbellard             start += 8;
8289fa3e853Sbellard         }
8299fa3e853Sbellard         if (start < end) {
8309fa3e853Sbellard             mask = ~(0xff << (end & 7));
8319fa3e853Sbellard             *tab |= mask;
8329fa3e853Sbellard         }
8339fa3e853Sbellard     }
8349fa3e853Sbellard }
8359fa3e853Sbellard 
8369fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
8379fa3e853Sbellard {
8389fa3e853Sbellard     int n, tb_start, tb_end;
8399fa3e853Sbellard     TranslationBlock *tb;
8409fa3e853Sbellard 
841b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
8429fa3e853Sbellard 
8439fa3e853Sbellard     tb = p->first_tb;
8449fa3e853Sbellard     while (tb != NULL) {
8459fa3e853Sbellard         n = (long)tb & 3;
8469fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8479fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8489fa3e853Sbellard         if (n == 0) {
8499fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8509fa3e853Sbellard                it is not a problem */
8519fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
8529fa3e853Sbellard             tb_end = tb_start + tb->size;
8539fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
8549fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
8559fa3e853Sbellard         } else {
8569fa3e853Sbellard             tb_start = 0;
8579fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8589fa3e853Sbellard         }
8599fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
8609fa3e853Sbellard         tb = tb->page_next[n];
8619fa3e853Sbellard     }
8629fa3e853Sbellard }
8639fa3e853Sbellard 
8642e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
8652e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
8662e70f6efSpbrook                               int flags, int cflags)
867d720b93dSbellard {
868d720b93dSbellard     TranslationBlock *tb;
869d720b93dSbellard     uint8_t *tc_ptr;
870d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
871d720b93dSbellard     int code_gen_size;
872d720b93dSbellard 
873c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
874c27004ecSbellard     tb = tb_alloc(pc);
875d720b93dSbellard     if (!tb) {
876d720b93dSbellard         /* flush must be done */
877d720b93dSbellard         tb_flush(env);
878d720b93dSbellard         /* cannot fail at this point */
879c27004ecSbellard         tb = tb_alloc(pc);
8802e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
8812e70f6efSpbrook         tb_invalidated_flag = 1;
882d720b93dSbellard     }
883d720b93dSbellard     tc_ptr = code_gen_ptr;
884d720b93dSbellard     tb->tc_ptr = tc_ptr;
885d720b93dSbellard     tb->cs_base = cs_base;
886d720b93dSbellard     tb->flags = flags;
887d720b93dSbellard     tb->cflags = cflags;
888d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
889d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890d720b93dSbellard 
891d720b93dSbellard     /* check next page if needed */
892c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893d720b93dSbellard     phys_page2 = -1;
894c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
896d720b93dSbellard     }
897d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
8982e70f6efSpbrook     return tb;
899d720b93dSbellard }
900d720b93dSbellard 
9019fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
9029fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
903d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
904d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
905d720b93dSbellard    TB if code is modified inside this TB. */
90600f82b8aSaurel32 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907d720b93dSbellard                                    int is_cpu_write_access)
9089fa3e853Sbellard {
9096b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
910d720b93dSbellard     CPUState *env = cpu_single_env;
9119fa3e853Sbellard     target_ulong tb_start, tb_end;
9126b917547Saliguori     PageDesc *p;
9136b917547Saliguori     int n;
9146b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9156b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9166b917547Saliguori     TranslationBlock *current_tb = NULL;
9176b917547Saliguori     int current_tb_modified = 0;
9186b917547Saliguori     target_ulong current_pc = 0;
9196b917547Saliguori     target_ulong current_cs_base = 0;
9206b917547Saliguori     int current_flags = 0;
9216b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9229fa3e853Sbellard 
9239fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9249fa3e853Sbellard     if (!p)
9259fa3e853Sbellard         return;
9269fa3e853Sbellard     if (!p->code_bitmap &&
927d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928d720b93dSbellard         is_cpu_write_access) {
9299fa3e853Sbellard         /* build code bitmap */
9309fa3e853Sbellard         build_page_bitmap(p);
9319fa3e853Sbellard     }
9329fa3e853Sbellard 
9339fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
9349fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
9359fa3e853Sbellard     tb = p->first_tb;
9369fa3e853Sbellard     while (tb != NULL) {
9379fa3e853Sbellard         n = (long)tb & 3;
9389fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9399fa3e853Sbellard         tb_next = tb->page_next[n];
9409fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9419fa3e853Sbellard         if (n == 0) {
9429fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9439fa3e853Sbellard                it is not a problem */
9449fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9459fa3e853Sbellard             tb_end = tb_start + tb->size;
9469fa3e853Sbellard         } else {
9479fa3e853Sbellard             tb_start = tb->page_addr[1];
9489fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9499fa3e853Sbellard         }
9509fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
951d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
952d720b93dSbellard             if (current_tb_not_found) {
953d720b93dSbellard                 current_tb_not_found = 0;
954d720b93dSbellard                 current_tb = NULL;
9552e70f6efSpbrook                 if (env->mem_io_pc) {
956d720b93dSbellard                     /* now we have a real cpu fault */
9572e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
958d720b93dSbellard                 }
959d720b93dSbellard             }
960d720b93dSbellard             if (current_tb == tb &&
9612e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
962d720b93dSbellard                 /* If we are modifying the current TB, we must stop
963d720b93dSbellard                 its execution. We could be more precise by checking
964d720b93dSbellard                 that the modification is after the current PC, but it
965d720b93dSbellard                 would require a specialized function to partially
966d720b93dSbellard                 restore the CPU state */
967d720b93dSbellard 
968d720b93dSbellard                 current_tb_modified = 1;
969d720b93dSbellard                 cpu_restore_state(current_tb, env,
9702e70f6efSpbrook                                   env->mem_io_pc, NULL);
9716b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
9726b917547Saliguori                                      &current_flags);
973d720b93dSbellard             }
974d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9756f5a9f7eSbellard             /* we need to do that to handle the case where a signal
9766f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
9776f5a9f7eSbellard             saved_tb = NULL;
9786f5a9f7eSbellard             if (env) {
979ea1c1802Sbellard                 saved_tb = env->current_tb;
980ea1c1802Sbellard                 env->current_tb = NULL;
9816f5a9f7eSbellard             }
9829fa3e853Sbellard             tb_phys_invalidate(tb, -1);
9836f5a9f7eSbellard             if (env) {
984ea1c1802Sbellard                 env->current_tb = saved_tb;
985ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
986ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
9879fa3e853Sbellard             }
9886f5a9f7eSbellard         }
9899fa3e853Sbellard         tb = tb_next;
9909fa3e853Sbellard     }
9919fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
9929fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
9939fa3e853Sbellard     if (!p->first_tb) {
9949fa3e853Sbellard         invalidate_page_bitmap(p);
995d720b93dSbellard         if (is_cpu_write_access) {
9962e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997d720b93dSbellard         }
998d720b93dSbellard     }
999d720b93dSbellard #endif
1000d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1001d720b93dSbellard     if (current_tb_modified) {
1002d720b93dSbellard         /* we generate a block containing just the instruction
1003d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1004d720b93dSbellard            itself */
1005ea1c1802Sbellard         env->current_tb = NULL;
10062e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007d720b93dSbellard         cpu_resume_from_signal(env, NULL);
10089fa3e853Sbellard     }
10099fa3e853Sbellard #endif
10109fa3e853Sbellard }
10119fa3e853Sbellard 
10129fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
101300f82b8aSaurel32 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
10149fa3e853Sbellard {
10159fa3e853Sbellard     PageDesc *p;
10169fa3e853Sbellard     int offset, b;
101759817ccbSbellard #if 0
1018a4193c8aSbellard     if (1) {
101993fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10202e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1021a4193c8aSbellard                   cpu_single_env->eip,
1022a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023a4193c8aSbellard     }
102459817ccbSbellard #endif
10259fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10269fa3e853Sbellard     if (!p)
10279fa3e853Sbellard         return;
10289fa3e853Sbellard     if (p->code_bitmap) {
10299fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
10309fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
10319fa3e853Sbellard         if (b & ((1 << len) - 1))
10329fa3e853Sbellard             goto do_invalidate;
10339fa3e853Sbellard     } else {
10349fa3e853Sbellard     do_invalidate:
1035d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
10369fa3e853Sbellard     }
10379fa3e853Sbellard }
10389fa3e853Sbellard 
10399fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
104000f82b8aSaurel32 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041d720b93dSbellard                                     unsigned long pc, void *puc)
10429fa3e853Sbellard {
10436b917547Saliguori     TranslationBlock *tb;
10449fa3e853Sbellard     PageDesc *p;
10456b917547Saliguori     int n;
1046d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
10476b917547Saliguori     TranslationBlock *current_tb = NULL;
1048d720b93dSbellard     CPUState *env = cpu_single_env;
10496b917547Saliguori     int current_tb_modified = 0;
10506b917547Saliguori     target_ulong current_pc = 0;
10516b917547Saliguori     target_ulong current_cs_base = 0;
10526b917547Saliguori     int current_flags = 0;
1053d720b93dSbellard #endif
10549fa3e853Sbellard 
10559fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
10569fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1057fd6ce8f6Sbellard     if (!p)
1058fd6ce8f6Sbellard         return;
1059fd6ce8f6Sbellard     tb = p->first_tb;
1060d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1061d720b93dSbellard     if (tb && pc != 0) {
1062d720b93dSbellard         current_tb = tb_find_pc(pc);
1063d720b93dSbellard     }
1064d720b93dSbellard #endif
1065fd6ce8f6Sbellard     while (tb != NULL) {
10669fa3e853Sbellard         n = (long)tb & 3;
10679fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1068d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1069d720b93dSbellard         if (current_tb == tb &&
10702e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1072d720b93dSbellard                    its execution. We could be more precise by checking
1073d720b93dSbellard                    that the modification is after the current PC, but it
1074d720b93dSbellard                    would require a specialized function to partially
1075d720b93dSbellard                    restore the CPU state */
1076d720b93dSbellard 
1077d720b93dSbellard             current_tb_modified = 1;
1078d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
10796b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10806b917547Saliguori                                  &current_flags);
1081d720b93dSbellard         }
1082d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10839fa3e853Sbellard         tb_phys_invalidate(tb, addr);
10849fa3e853Sbellard         tb = tb->page_next[n];
1085fd6ce8f6Sbellard     }
1086fd6ce8f6Sbellard     p->first_tb = NULL;
1087d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1088d720b93dSbellard     if (current_tb_modified) {
1089d720b93dSbellard         /* we generate a block containing just the instruction
1090d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1091d720b93dSbellard            itself */
1092ea1c1802Sbellard         env->current_tb = NULL;
10932e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094d720b93dSbellard         cpu_resume_from_signal(env, puc);
1095d720b93dSbellard     }
1096d720b93dSbellard #endif
1097fd6ce8f6Sbellard }
10989fa3e853Sbellard #endif
1099fd6ce8f6Sbellard 
1100fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
11019fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
110253a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1103fd6ce8f6Sbellard {
1104fd6ce8f6Sbellard     PageDesc *p;
11059fa3e853Sbellard     TranslationBlock *last_first_tb;
11069fa3e853Sbellard 
11079fa3e853Sbellard     tb->page_addr[n] = page_addr;
11083a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
11099fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11109fa3e853Sbellard     last_first_tb = p->first_tb;
11119fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11129fa3e853Sbellard     invalidate_page_bitmap(p);
11139fa3e853Sbellard 
1114107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1115d720b93dSbellard 
11169fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11179fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
111853a5960aSpbrook         target_ulong addr;
111953a5960aSpbrook         PageDesc *p2;
1120fd6ce8f6Sbellard         int prot;
1121fd6ce8f6Sbellard 
1122fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1123fd6ce8f6Sbellard            page fault + mprotect overhead) */
112453a5960aSpbrook         page_addr &= qemu_host_page_mask;
1125fd6ce8f6Sbellard         prot = 0;
112653a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
112753a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
112853a5960aSpbrook 
112953a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
113053a5960aSpbrook             if (!p2)
113153a5960aSpbrook                 continue;
113253a5960aSpbrook             prot |= p2->flags;
113353a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
113453a5960aSpbrook             page_get_flags(addr);
113553a5960aSpbrook           }
113653a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1137fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1138fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1139ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
114053a5960aSpbrook                page_addr);
1141fd6ce8f6Sbellard #endif
1142fd6ce8f6Sbellard     }
11439fa3e853Sbellard #else
11449fa3e853Sbellard     /* if some code is already present, then the pages are already
11459fa3e853Sbellard        protected. So we handle the case where only the first TB is
11469fa3e853Sbellard        allocated in a physical page */
11479fa3e853Sbellard     if (!last_first_tb) {
11486a00d601Sbellard         tlb_protect_code(page_addr);
11499fa3e853Sbellard     }
11509fa3e853Sbellard #endif
1151d720b93dSbellard 
1152d720b93dSbellard #endif /* TARGET_HAS_SMC */
1153fd6ce8f6Sbellard }
1154fd6ce8f6Sbellard 
1155fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1156fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1157c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1158fd6ce8f6Sbellard {
1159fd6ce8f6Sbellard     TranslationBlock *tb;
1160fd6ce8f6Sbellard 
116126a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
116226a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163d4e8164fSbellard         return NULL;
1164fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1165fd6ce8f6Sbellard     tb->pc = pc;
1166b448f2f3Sbellard     tb->cflags = 0;
1167d4e8164fSbellard     return tb;
1168d4e8164fSbellard }
1169d4e8164fSbellard 
11702e70f6efSpbrook void tb_free(TranslationBlock *tb)
11712e70f6efSpbrook {
1172bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
11732e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
11742e70f6efSpbrook        be the last one generated.  */
11752e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
11762e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
11772e70f6efSpbrook         nb_tbs--;
11782e70f6efSpbrook     }
11792e70f6efSpbrook }
11802e70f6efSpbrook 
11819fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
11829fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
11839fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
11849fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1185d4e8164fSbellard {
11869fa3e853Sbellard     unsigned int h;
11879fa3e853Sbellard     TranslationBlock **ptb;
11889fa3e853Sbellard 
1189c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1190c8a706feSpbrook        before we are done.  */
1191c8a706feSpbrook     mmap_lock();
11929fa3e853Sbellard     /* add in the physical hash table */
11939fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
11949fa3e853Sbellard     ptb = &tb_phys_hash[h];
11959fa3e853Sbellard     tb->phys_hash_next = *ptb;
11969fa3e853Sbellard     *ptb = tb;
1197fd6ce8f6Sbellard 
1198fd6ce8f6Sbellard     /* add in the page list */
11999fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12009fa3e853Sbellard     if (phys_page2 != -1)
12019fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12029fa3e853Sbellard     else
12039fa3e853Sbellard         tb->page_addr[1] = -1;
12049fa3e853Sbellard 
1205d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206d4e8164fSbellard     tb->jmp_next[0] = NULL;
1207d4e8164fSbellard     tb->jmp_next[1] = NULL;
1208d4e8164fSbellard 
1209d4e8164fSbellard     /* init original jump addresses */
1210d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1211d4e8164fSbellard         tb_reset_jump(tb, 0);
1212d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1213d4e8164fSbellard         tb_reset_jump(tb, 1);
12148a40a180Sbellard 
12158a40a180Sbellard #ifdef DEBUG_TB_CHECK
12168a40a180Sbellard     tb_page_check();
12178a40a180Sbellard #endif
1218c8a706feSpbrook     mmap_unlock();
1219fd6ce8f6Sbellard }
1220fd6ce8f6Sbellard 
1221a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1223a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224a513fe19Sbellard {
1225a513fe19Sbellard     int m_min, m_max, m;
1226a513fe19Sbellard     unsigned long v;
1227a513fe19Sbellard     TranslationBlock *tb;
1228a513fe19Sbellard 
1229a513fe19Sbellard     if (nb_tbs <= 0)
1230a513fe19Sbellard         return NULL;
1231a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1232a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1233a513fe19Sbellard         return NULL;
1234a513fe19Sbellard     /* binary search (cf Knuth) */
1235a513fe19Sbellard     m_min = 0;
1236a513fe19Sbellard     m_max = nb_tbs - 1;
1237a513fe19Sbellard     while (m_min <= m_max) {
1238a513fe19Sbellard         m = (m_min + m_max) >> 1;
1239a513fe19Sbellard         tb = &tbs[m];
1240a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1241a513fe19Sbellard         if (v == tc_ptr)
1242a513fe19Sbellard             return tb;
1243a513fe19Sbellard         else if (tc_ptr < v) {
1244a513fe19Sbellard             m_max = m - 1;
1245a513fe19Sbellard         } else {
1246a513fe19Sbellard             m_min = m + 1;
1247a513fe19Sbellard         }
1248a513fe19Sbellard     }
1249a513fe19Sbellard     return &tbs[m_max];
1250a513fe19Sbellard }
12517501267eSbellard 
1252ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1253ea041c0eSbellard 
1254ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255ea041c0eSbellard {
1256ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1257ea041c0eSbellard     unsigned int n1;
1258ea041c0eSbellard 
1259ea041c0eSbellard     tb1 = tb->jmp_next[n];
1260ea041c0eSbellard     if (tb1 != NULL) {
1261ea041c0eSbellard         /* find head of list */
1262ea041c0eSbellard         for(;;) {
1263ea041c0eSbellard             n1 = (long)tb1 & 3;
1264ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265ea041c0eSbellard             if (n1 == 2)
1266ea041c0eSbellard                 break;
1267ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1268ea041c0eSbellard         }
1269ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1270ea041c0eSbellard         tb_next = tb1;
1271ea041c0eSbellard 
1272ea041c0eSbellard         /* remove tb from the jmp_first list */
1273ea041c0eSbellard         ptb = &tb_next->jmp_first;
1274ea041c0eSbellard         for(;;) {
1275ea041c0eSbellard             tb1 = *ptb;
1276ea041c0eSbellard             n1 = (long)tb1 & 3;
1277ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278ea041c0eSbellard             if (n1 == n && tb1 == tb)
1279ea041c0eSbellard                 break;
1280ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1281ea041c0eSbellard         }
1282ea041c0eSbellard         *ptb = tb->jmp_next[n];
1283ea041c0eSbellard         tb->jmp_next[n] = NULL;
1284ea041c0eSbellard 
1285ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1286ea041c0eSbellard         tb_reset_jump(tb, n);
1287ea041c0eSbellard 
12880124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1289ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1290ea041c0eSbellard     }
1291ea041c0eSbellard }
1292ea041c0eSbellard 
1293ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1294ea041c0eSbellard {
1295ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1296ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1297ea041c0eSbellard }
1298ea041c0eSbellard 
12991fddef4bSbellard #if defined(TARGET_HAS_ICE)
1300d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301d720b93dSbellard {
13029b3c35e0Sj_mayer     target_phys_addr_t addr;
13039b3c35e0Sj_mayer     target_ulong pd;
1304c2f07f81Spbrook     ram_addr_t ram_addr;
1305c2f07f81Spbrook     PhysPageDesc *p;
1306d720b93dSbellard 
1307c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1308c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309c2f07f81Spbrook     if (!p) {
1310c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1311c2f07f81Spbrook     } else {
1312c2f07f81Spbrook         pd = p->phys_offset;
1313c2f07f81Spbrook     }
1314c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316d720b93dSbellard }
1317c27004ecSbellard #endif
1318d720b93dSbellard 
13196658ffb8Spbrook /* Add a watchpoint.  */
1320a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
13226658ffb8Spbrook {
1323b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1324c0ce998eSaliguori     CPUWatchpoint *wp;
13256658ffb8Spbrook 
1326b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330b4051334Saliguori         return -EINVAL;
1331b4051334Saliguori     }
1332a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
13336658ffb8Spbrook 
1334a1d1bb31Saliguori     wp->vaddr = addr;
1335b4051334Saliguori     wp->len_mask = len_mask;
1336a1d1bb31Saliguori     wp->flags = flags;
1337a1d1bb31Saliguori 
13382dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1339c0ce998eSaliguori     if (flags & BP_GDB)
1340c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341c0ce998eSaliguori     else
1342c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343a1d1bb31Saliguori 
13446658ffb8Spbrook     tlb_flush_page(env, addr);
1345a1d1bb31Saliguori 
1346a1d1bb31Saliguori     if (watchpoint)
1347a1d1bb31Saliguori         *watchpoint = wp;
1348a1d1bb31Saliguori     return 0;
13496658ffb8Spbrook }
13506658ffb8Spbrook 
1351a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1352a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353a1d1bb31Saliguori                           int flags)
13546658ffb8Spbrook {
1355b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1356a1d1bb31Saliguori     CPUWatchpoint *wp;
13576658ffb8Spbrook 
1358c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
13606e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
13626658ffb8Spbrook             return 0;
13636658ffb8Spbrook         }
13646658ffb8Spbrook     }
1365a1d1bb31Saliguori     return -ENOENT;
13666658ffb8Spbrook }
13676658ffb8Spbrook 
1368a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1369a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370a1d1bb31Saliguori {
1371c0ce998eSaliguori     TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
13727d03f82fSedgar_igl 
1373a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1374a1d1bb31Saliguori 
1375a1d1bb31Saliguori     qemu_free(watchpoint);
13767d03f82fSedgar_igl }
13777d03f82fSedgar_igl 
1378a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1379a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380a1d1bb31Saliguori {
1381c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1382a1d1bb31Saliguori 
1383c0ce998eSaliguori     TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384a1d1bb31Saliguori         if (wp->flags & mask)
1385a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1386a1d1bb31Saliguori     }
1387c0ce998eSaliguori }
1388a1d1bb31Saliguori 
1389a1d1bb31Saliguori /* Add a breakpoint.  */
1390a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
13924c3a88a2Sbellard {
13931fddef4bSbellard #if defined(TARGET_HAS_ICE)
1394c0ce998eSaliguori     CPUBreakpoint *bp;
13954c3a88a2Sbellard 
1396a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
13974c3a88a2Sbellard 
1398a1d1bb31Saliguori     bp->pc = pc;
1399a1d1bb31Saliguori     bp->flags = flags;
1400a1d1bb31Saliguori 
14012dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1402c0ce998eSaliguori     if (flags & BP_GDB)
1403c0ce998eSaliguori         TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404c0ce998eSaliguori     else
1405c0ce998eSaliguori         TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406d720b93dSbellard 
1407d720b93dSbellard     breakpoint_invalidate(env, pc);
1408a1d1bb31Saliguori 
1409a1d1bb31Saliguori     if (breakpoint)
1410a1d1bb31Saliguori         *breakpoint = bp;
14114c3a88a2Sbellard     return 0;
14124c3a88a2Sbellard #else
1413a1d1bb31Saliguori     return -ENOSYS;
14144c3a88a2Sbellard #endif
14154c3a88a2Sbellard }
14164c3a88a2Sbellard 
1417a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1418a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419a1d1bb31Saliguori {
14207d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1421a1d1bb31Saliguori     CPUBreakpoint *bp;
1422a1d1bb31Saliguori 
1423c0ce998eSaliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1425a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1426a1d1bb31Saliguori             return 0;
14277d03f82fSedgar_igl         }
1428a1d1bb31Saliguori     }
1429a1d1bb31Saliguori     return -ENOENT;
1430a1d1bb31Saliguori #else
1431a1d1bb31Saliguori     return -ENOSYS;
14327d03f82fSedgar_igl #endif
14337d03f82fSedgar_igl }
14347d03f82fSedgar_igl 
1435a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1436a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
14374c3a88a2Sbellard {
14381fddef4bSbellard #if defined(TARGET_HAS_ICE)
1439c0ce998eSaliguori     TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440d720b93dSbellard 
1441a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1442a1d1bb31Saliguori 
1443a1d1bb31Saliguori     qemu_free(breakpoint);
1444a1d1bb31Saliguori #endif
1445a1d1bb31Saliguori }
1446a1d1bb31Saliguori 
1447a1d1bb31Saliguori /* Remove all matching breakpoints. */
1448a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449a1d1bb31Saliguori {
1450a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1451c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1452a1d1bb31Saliguori 
1453c0ce998eSaliguori     TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454a1d1bb31Saliguori         if (bp->flags & mask)
1455a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1456c0ce998eSaliguori     }
14574c3a88a2Sbellard #endif
14584c3a88a2Sbellard }
14594c3a88a2Sbellard 
1460c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1461c33a346eSbellard    CPU loop after each instruction */
1462c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1463c33a346eSbellard {
14641fddef4bSbellard #if defined(TARGET_HAS_ICE)
1465c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1466c33a346eSbellard         env->singlestep_enabled = enabled;
1467e22a25c9Saliguori         if (kvm_enabled())
1468e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1469e22a25c9Saliguori         else {
1470ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
14719fa3e853Sbellard             /* XXX: only flush what is necessary */
14720124311eSbellard             tb_flush(env);
1473c33a346eSbellard         }
1474e22a25c9Saliguori     }
1475c33a346eSbellard #endif
1476c33a346eSbellard }
1477c33a346eSbellard 
147834865134Sbellard /* enable or disable low levels log */
147934865134Sbellard void cpu_set_log(int log_flags)
148034865134Sbellard {
148134865134Sbellard     loglevel = log_flags;
148234865134Sbellard     if (loglevel && !logfile) {
148311fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
148434865134Sbellard         if (!logfile) {
148534865134Sbellard             perror(logfilename);
148634865134Sbellard             _exit(1);
148734865134Sbellard         }
14889fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
14899fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
14909fa3e853Sbellard         {
1491b55266b5Sblueswir1             static char logfile_buf[4096];
14929fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
14939fa3e853Sbellard         }
1494bf65f53fSFilip Navara #elif !defined(_WIN32)
1495bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
149634865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
14979fa3e853Sbellard #endif
1498e735b91cSpbrook         log_append = 1;
1499e735b91cSpbrook     }
1500e735b91cSpbrook     if (!loglevel && logfile) {
1501e735b91cSpbrook         fclose(logfile);
1502e735b91cSpbrook         logfile = NULL;
150334865134Sbellard     }
150434865134Sbellard }
150534865134Sbellard 
150634865134Sbellard void cpu_set_log_filename(const char *filename)
150734865134Sbellard {
150834865134Sbellard     logfilename = strdup(filename);
1509e735b91cSpbrook     if (logfile) {
1510e735b91cSpbrook         fclose(logfile);
1511e735b91cSpbrook         logfile = NULL;
1512e735b91cSpbrook     }
1513e735b91cSpbrook     cpu_set_log(loglevel);
151434865134Sbellard }
1515c33a346eSbellard 
15163098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1517ea041c0eSbellard {
15182f7bb878SJuan Quintela #if defined(CONFIG_USE_NPTL)
1519d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1520d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1521d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1522d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
1523d5975363Spbrook #else
15243098dba0Saurel32     TranslationBlock *tb;
15253098dba0Saurel32     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
15263098dba0Saurel32 
15273098dba0Saurel32     tb = env->current_tb;
15283098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
15293098dba0Saurel32        all the potentially executing TB */
15303098dba0Saurel32     if (tb && !testandset(&interrupt_lock)) {
15313098dba0Saurel32         env->current_tb = NULL;
15323098dba0Saurel32         tb_reset_jump_recursive(tb);
15333098dba0Saurel32         resetlock(&interrupt_lock);
15343098dba0Saurel32     }
15353098dba0Saurel32 #endif
15363098dba0Saurel32 }
15373098dba0Saurel32 
15383098dba0Saurel32 /* mask must never be zero, except for A20 change call */
15393098dba0Saurel32 void cpu_interrupt(CPUState *env, int mask)
15403098dba0Saurel32 {
15413098dba0Saurel32     int old_mask;
15423098dba0Saurel32 
15433098dba0Saurel32     old_mask = env->interrupt_request;
15443098dba0Saurel32     env->interrupt_request |= mask;
15453098dba0Saurel32 
15468edac960Saliguori #ifndef CONFIG_USER_ONLY
15478edac960Saliguori     /*
15488edac960Saliguori      * If called from iothread context, wake the target cpu in
15498edac960Saliguori      * case its halted.
15508edac960Saliguori      */
15518edac960Saliguori     if (!qemu_cpu_self(env)) {
15528edac960Saliguori         qemu_cpu_kick(env);
15538edac960Saliguori         return;
15548edac960Saliguori     }
15558edac960Saliguori #endif
15568edac960Saliguori 
15572e70f6efSpbrook     if (use_icount) {
1558266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
15592e70f6efSpbrook #ifndef CONFIG_USER_ONLY
15602e70f6efSpbrook         if (!can_do_io(env)
1561be214e6cSaurel32             && (mask & ~old_mask) != 0) {
15622e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
15632e70f6efSpbrook         }
15642e70f6efSpbrook #endif
15652e70f6efSpbrook     } else {
15663098dba0Saurel32         cpu_unlink_tb(env);
1567ea041c0eSbellard     }
15682e70f6efSpbrook }
1569ea041c0eSbellard 
1570b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1571b54ad049Sbellard {
1572b54ad049Sbellard     env->interrupt_request &= ~mask;
1573b54ad049Sbellard }
1574b54ad049Sbellard 
15753098dba0Saurel32 void cpu_exit(CPUState *env)
15763098dba0Saurel32 {
15773098dba0Saurel32     env->exit_request = 1;
15783098dba0Saurel32     cpu_unlink_tb(env);
15793098dba0Saurel32 }
15803098dba0Saurel32 
1581c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1582f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1583f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1584f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1585f193c797Sbellard       "show target assembly code for each compiled TB" },
1586f193c797Sbellard     { CPU_LOG_TB_OP, "op",
158757fec1feSbellard       "show micro ops for each compiled TB" },
1588f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1589e01a1157Sblueswir1       "show micro ops "
1590e01a1157Sblueswir1 #ifdef TARGET_I386
1591e01a1157Sblueswir1       "before eflags optimization and "
1592f193c797Sbellard #endif
1593e01a1157Sblueswir1       "after liveness analysis" },
1594f193c797Sbellard     { CPU_LOG_INT, "int",
1595f193c797Sbellard       "show interrupts/exceptions in short format" },
1596f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1597f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
15989fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1599e91c8a77Sths       "show CPU state before block translation" },
1600f193c797Sbellard #ifdef TARGET_I386
1601f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1602f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1603eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1604eca1bdf4Saliguori       "show CPU state before CPU resets" },
1605f193c797Sbellard #endif
16068e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1607fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1608fd872598Sbellard       "show all i/o ports accesses" },
16098e3a9fd2Sbellard #endif
1610f193c797Sbellard     { 0, NULL, NULL },
1611f193c797Sbellard };
1612f193c797Sbellard 
1613f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1614f193c797Sbellard {
1615f193c797Sbellard     if (strlen(s2) != n)
1616f193c797Sbellard         return 0;
1617f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1618f193c797Sbellard }
1619f193c797Sbellard 
1620f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1621f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1622f193c797Sbellard {
1623c7cd6a37Sblueswir1     const CPULogItem *item;
1624f193c797Sbellard     int mask;
1625f193c797Sbellard     const char *p, *p1;
1626f193c797Sbellard 
1627f193c797Sbellard     p = str;
1628f193c797Sbellard     mask = 0;
1629f193c797Sbellard     for(;;) {
1630f193c797Sbellard         p1 = strchr(p, ',');
1631f193c797Sbellard         if (!p1)
1632f193c797Sbellard             p1 = p + strlen(p);
16338e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
16348e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
16358e3a9fd2Sbellard 			mask |= item->mask;
16368e3a9fd2Sbellard 		}
16378e3a9fd2Sbellard 	} else {
1638f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1639f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1640f193c797Sbellard                 goto found;
1641f193c797Sbellard         }
1642f193c797Sbellard         return 0;
16438e3a9fd2Sbellard 	}
1644f193c797Sbellard     found:
1645f193c797Sbellard         mask |= item->mask;
1646f193c797Sbellard         if (*p1 != ',')
1647f193c797Sbellard             break;
1648f193c797Sbellard         p = p1 + 1;
1649f193c797Sbellard     }
1650f193c797Sbellard     return mask;
1651f193c797Sbellard }
1652ea041c0eSbellard 
16537501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
16547501267eSbellard {
16557501267eSbellard     va_list ap;
1656493ae1f0Spbrook     va_list ap2;
16577501267eSbellard 
16587501267eSbellard     va_start(ap, fmt);
1659493ae1f0Spbrook     va_copy(ap2, ap);
16607501267eSbellard     fprintf(stderr, "qemu: fatal: ");
16617501267eSbellard     vfprintf(stderr, fmt, ap);
16627501267eSbellard     fprintf(stderr, "\n");
16637501267eSbellard #ifdef TARGET_I386
16647fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
16657fe48483Sbellard #else
16667fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
16677501267eSbellard #endif
166893fcfe39Saliguori     if (qemu_log_enabled()) {
166993fcfe39Saliguori         qemu_log("qemu: fatal: ");
167093fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
167193fcfe39Saliguori         qemu_log("\n");
1672f9373291Sj_mayer #ifdef TARGET_I386
167393fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1674f9373291Sj_mayer #else
167593fcfe39Saliguori         log_cpu_state(env, 0);
1676f9373291Sj_mayer #endif
167731b1a7b4Saliguori         qemu_log_flush();
167893fcfe39Saliguori         qemu_log_close();
1679924edcaeSbalrog     }
1680493ae1f0Spbrook     va_end(ap2);
1681f9373291Sj_mayer     va_end(ap);
16827501267eSbellard     abort();
16837501267eSbellard }
16847501267eSbellard 
1685c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1686c5be9f08Sths {
168701ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1688c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1689c5be9f08Sths     int cpu_index = new_env->cpu_index;
16905a38f081Saliguori #if defined(TARGET_HAS_ICE)
16915a38f081Saliguori     CPUBreakpoint *bp;
16925a38f081Saliguori     CPUWatchpoint *wp;
16935a38f081Saliguori #endif
16945a38f081Saliguori 
1695c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
16965a38f081Saliguori 
16975a38f081Saliguori     /* Preserve chaining and index. */
1698c5be9f08Sths     new_env->next_cpu = next_cpu;
1699c5be9f08Sths     new_env->cpu_index = cpu_index;
17005a38f081Saliguori 
17015a38f081Saliguori     /* Clone all break/watchpoints.
17025a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
17035a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
17045a38f081Saliguori     TAILQ_INIT(&env->breakpoints);
17055a38f081Saliguori     TAILQ_INIT(&env->watchpoints);
17065a38f081Saliguori #if defined(TARGET_HAS_ICE)
17075a38f081Saliguori     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
17085a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
17095a38f081Saliguori     }
17105a38f081Saliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
17115a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
17125a38f081Saliguori                               wp->flags, NULL);
17135a38f081Saliguori     }
17145a38f081Saliguori #endif
17155a38f081Saliguori 
1716c5be9f08Sths     return new_env;
1717c5be9f08Sths }
1718c5be9f08Sths 
17190124311eSbellard #if !defined(CONFIG_USER_ONLY)
17200124311eSbellard 
17215c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
17225c751e99Sedgar_igl {
17235c751e99Sedgar_igl     unsigned int i;
17245c751e99Sedgar_igl 
17255c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
17265c751e99Sedgar_igl        overlap the flushed page.  */
17275c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
17285c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
17295c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
17305c751e99Sedgar_igl 
17315c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
17325c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
17335c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
17345c751e99Sedgar_igl }
17355c751e99Sedgar_igl 
173608738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
173708738984SIgor Kovalenko     .addr_read  = -1,
173808738984SIgor Kovalenko     .addr_write = -1,
173908738984SIgor Kovalenko     .addr_code  = -1,
174008738984SIgor Kovalenko     .addend     = -1,
174108738984SIgor Kovalenko };
174208738984SIgor Kovalenko 
1743ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1744ee8b7021Sbellard    implemented yet) */
1745ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
174633417e70Sbellard {
174733417e70Sbellard     int i;
17480124311eSbellard 
17499fa3e853Sbellard #if defined(DEBUG_TLB)
17509fa3e853Sbellard     printf("tlb_flush:\n");
17519fa3e853Sbellard #endif
17520124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17530124311eSbellard        links while we are modifying them */
17540124311eSbellard     env->current_tb = NULL;
17550124311eSbellard 
175633417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1757cfde4bd9SIsaku Yamahata         int mmu_idx;
1758cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
175908738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1760cfde4bd9SIsaku Yamahata         }
176133417e70Sbellard     }
17629fa3e853Sbellard 
17638a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
17649fa3e853Sbellard 
1765e3db7226Sbellard     tlb_flush_count++;
176633417e70Sbellard }
176733417e70Sbellard 
1768274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
176961382a50Sbellard {
177084b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
177184b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
177284b7b8e7Sbellard         addr == (tlb_entry->addr_write &
177384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
177484b7b8e7Sbellard         addr == (tlb_entry->addr_code &
177584b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
177608738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
177784b7b8e7Sbellard     }
177861382a50Sbellard }
177961382a50Sbellard 
17802e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
178133417e70Sbellard {
17828a40a180Sbellard     int i;
1783cfde4bd9SIsaku Yamahata     int mmu_idx;
17840124311eSbellard 
17859fa3e853Sbellard #if defined(DEBUG_TLB)
1786108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
17879fa3e853Sbellard #endif
17880124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17890124311eSbellard        links while we are modifying them */
17900124311eSbellard     env->current_tb = NULL;
179133417e70Sbellard 
179261382a50Sbellard     addr &= TARGET_PAGE_MASK;
179333417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1794cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1795cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
17960124311eSbellard 
17975c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
17989fa3e853Sbellard }
17999fa3e853Sbellard 
18009fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
18019fa3e853Sbellard    can be detected */
18026a00d601Sbellard static void tlb_protect_code(ram_addr_t ram_addr)
180361382a50Sbellard {
18046a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
18056a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
18066a00d601Sbellard                                     CODE_DIRTY_FLAG);
18079fa3e853Sbellard }
18089fa3e853Sbellard 
18099fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
18103a7d929eSbellard    tested for self modifying code */
18113a7d929eSbellard static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
18123a7d929eSbellard                                     target_ulong vaddr)
18139fa3e853Sbellard {
18143a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
18159fa3e853Sbellard }
18169fa3e853Sbellard 
18171ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
18181ccde1cbSbellard                                          unsigned long start, unsigned long length)
18191ccde1cbSbellard {
18201ccde1cbSbellard     unsigned long addr;
182184b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
182284b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
18231ccde1cbSbellard         if ((addr - start) < length) {
18240f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
18251ccde1cbSbellard         }
18261ccde1cbSbellard     }
18271ccde1cbSbellard }
18281ccde1cbSbellard 
18295579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
18303a7d929eSbellard void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
18310a962c02Sbellard                                      int dirty_flags)
18321ccde1cbSbellard {
18331ccde1cbSbellard     CPUState *env;
18344f2ac237Sbellard     unsigned long length, start1;
18350a962c02Sbellard     int i, mask, len;
18360a962c02Sbellard     uint8_t *p;
18371ccde1cbSbellard 
18381ccde1cbSbellard     start &= TARGET_PAGE_MASK;
18391ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
18401ccde1cbSbellard 
18411ccde1cbSbellard     length = end - start;
18421ccde1cbSbellard     if (length == 0)
18431ccde1cbSbellard         return;
18440a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
1845f23db169Sbellard     mask = ~dirty_flags;
1846f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1847f23db169Sbellard     for(i = 0; i < len; i++)
1848f23db169Sbellard         p[i] &= mask;
1849f23db169Sbellard 
18501ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18511ccde1cbSbellard        when accessing the range */
18525579c7f3Spbrook     start1 = (unsigned long)qemu_get_ram_ptr(start);
18535579c7f3Spbrook     /* Chek that we don't span multiple blocks - this breaks the
18545579c7f3Spbrook        address comparisons below.  */
18555579c7f3Spbrook     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
18565579c7f3Spbrook             != (end - 1) - start) {
18575579c7f3Spbrook         abort();
18585579c7f3Spbrook     }
18595579c7f3Spbrook 
18606a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1861cfde4bd9SIsaku Yamahata         int mmu_idx;
1862cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
18631ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
1864cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1865cfde4bd9SIsaku Yamahata                                       start1, length);
1866cfde4bd9SIsaku Yamahata         }
18676a00d601Sbellard     }
18681ccde1cbSbellard }
18691ccde1cbSbellard 
187074576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
187174576198Saliguori {
187274576198Saliguori     in_migration = enable;
1873b0a46a33SJan Kiszka     if (kvm_enabled()) {
1874b0a46a33SJan Kiszka         return kvm_set_migration_log(enable);
1875b0a46a33SJan Kiszka     }
187674576198Saliguori     return 0;
187774576198Saliguori }
187874576198Saliguori 
187974576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
188074576198Saliguori {
188174576198Saliguori     return in_migration;
188274576198Saliguori }
188374576198Saliguori 
1884151f7749SJan Kiszka int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1885151f7749SJan Kiszka                                    target_phys_addr_t end_addr)
18862bec46dcSaliguori {
1887151f7749SJan Kiszka     int ret = 0;
1888151f7749SJan Kiszka 
18892bec46dcSaliguori     if (kvm_enabled())
1890151f7749SJan Kiszka         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1891151f7749SJan Kiszka     return ret;
18922bec46dcSaliguori }
18932bec46dcSaliguori 
18943a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
18953a7d929eSbellard {
18963a7d929eSbellard     ram_addr_t ram_addr;
18975579c7f3Spbrook     void *p;
18983a7d929eSbellard 
189984b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
19005579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
19015579c7f3Spbrook             + tlb_entry->addend);
19025579c7f3Spbrook         ram_addr = qemu_ram_addr_from_host(p);
19033a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
19040f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
19053a7d929eSbellard         }
19063a7d929eSbellard     }
19073a7d929eSbellard }
19083a7d929eSbellard 
19093a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
19103a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
19113a7d929eSbellard {
19123a7d929eSbellard     int i;
1913cfde4bd9SIsaku Yamahata     int mmu_idx;
1914cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
19153a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
1916cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1917cfde4bd9SIsaku Yamahata     }
19183a7d929eSbellard }
19193a7d929eSbellard 
19200f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
19211ccde1cbSbellard {
19220f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
19230f459d16Spbrook         tlb_entry->addr_write = vaddr;
19241ccde1cbSbellard }
19251ccde1cbSbellard 
19260f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
19270f459d16Spbrook    so that it is no longer dirty */
19280f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
19291ccde1cbSbellard {
19301ccde1cbSbellard     int i;
1931cfde4bd9SIsaku Yamahata     int mmu_idx;
19321ccde1cbSbellard 
19330f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
19341ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1936cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
19371ccde1cbSbellard }
19381ccde1cbSbellard 
193959817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
194059817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
194159817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
194259817ccbSbellard    conflicting with the host address space). */
194384b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
19442e12669aSbellard                       target_phys_addr_t paddr, int prot,
19456ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
19469fa3e853Sbellard {
194792e873b9Sbellard     PhysPageDesc *p;
19484f2ac237Sbellard     unsigned long pd;
19499fa3e853Sbellard     unsigned int index;
19504f2ac237Sbellard     target_ulong address;
19510f459d16Spbrook     target_ulong code_address;
1952108c49b8Sbellard     target_phys_addr_t addend;
19539fa3e853Sbellard     int ret;
195484b7b8e7Sbellard     CPUTLBEntry *te;
1955a1d1bb31Saliguori     CPUWatchpoint *wp;
19560f459d16Spbrook     target_phys_addr_t iotlb;
19579fa3e853Sbellard 
195892e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
19599fa3e853Sbellard     if (!p) {
19609fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
19619fa3e853Sbellard     } else {
19629fa3e853Sbellard         pd = p->phys_offset;
19639fa3e853Sbellard     }
19649fa3e853Sbellard #if defined(DEBUG_TLB)
19656ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
19666ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
19679fa3e853Sbellard #endif
19689fa3e853Sbellard 
19699fa3e853Sbellard     ret = 0;
19709fa3e853Sbellard     address = vaddr;
19710f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
19720f459d16Spbrook         /* IO memory case (romd handled later) */
19730f459d16Spbrook         address |= TLB_MMIO;
19740f459d16Spbrook     }
19755579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
19760f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
19770f459d16Spbrook         /* Normal RAM.  */
19780f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
19790f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
19800f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
19810f459d16Spbrook         else
19820f459d16Spbrook             iotlb |= IO_MEM_ROM;
19830f459d16Spbrook     } else {
1984ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
19850f459d16Spbrook            It would be nice to pass an offset from the base address
19860f459d16Spbrook            of that region.  This would avoid having to special case RAM,
19870f459d16Spbrook            and avoid full address decoding in every device.
19880f459d16Spbrook            We can't use the high bits of pd for this because
19890f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
19908da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
19918da3ff18Spbrook         if (p) {
19928da3ff18Spbrook             iotlb += p->region_offset;
19938da3ff18Spbrook         } else {
19948da3ff18Spbrook             iotlb += paddr;
19958da3ff18Spbrook         }
19969fa3e853Sbellard     }
19979fa3e853Sbellard 
19980f459d16Spbrook     code_address = address;
19996658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
20006658ffb8Spbrook        watchpoint trap routines.  */
2001c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2002a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
20030f459d16Spbrook             iotlb = io_mem_watch + paddr;
20040f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
20050f459d16Spbrook                reads of pages with a write breakpoint.  */
20060f459d16Spbrook             address |= TLB_MMIO;
20076658ffb8Spbrook         }
20086658ffb8Spbrook     }
20096658ffb8Spbrook 
201090f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
20110f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
20126ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
20130f459d16Spbrook     te->addend = addend - vaddr;
201467b915a5Sbellard     if (prot & PAGE_READ) {
201584b7b8e7Sbellard         te->addr_read = address;
20169fa3e853Sbellard     } else {
201784b7b8e7Sbellard         te->addr_read = -1;
201884b7b8e7Sbellard     }
20195c751e99Sedgar_igl 
202084b7b8e7Sbellard     if (prot & PAGE_EXEC) {
20210f459d16Spbrook         te->addr_code = code_address;
202284b7b8e7Sbellard     } else {
202384b7b8e7Sbellard         te->addr_code = -1;
20249fa3e853Sbellard     }
202567b915a5Sbellard     if (prot & PAGE_WRITE) {
2026856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2027856074ecSbellard             (pd & IO_MEM_ROMD)) {
20280f459d16Spbrook             /* Write access calls the I/O callback.  */
20290f459d16Spbrook             te->addr_write = address | TLB_MMIO;
20303a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
20311ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
20320f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
20339fa3e853Sbellard         } else {
203484b7b8e7Sbellard             te->addr_write = address;
20359fa3e853Sbellard         }
20369fa3e853Sbellard     } else {
203784b7b8e7Sbellard         te->addr_write = -1;
20389fa3e853Sbellard     }
20399fa3e853Sbellard     return ret;
20409fa3e853Sbellard }
20419fa3e853Sbellard 
20420124311eSbellard #else
20430124311eSbellard 
2044ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
20450124311eSbellard {
20460124311eSbellard }
20470124311eSbellard 
20482e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
20490124311eSbellard {
20500124311eSbellard }
20510124311eSbellard 
205284b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
20532e12669aSbellard                       target_phys_addr_t paddr, int prot,
20546ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
205533417e70Sbellard {
20569fa3e853Sbellard     return 0;
205733417e70Sbellard }
205833417e70Sbellard 
2059edf8e2afSMika Westerberg /*
2060edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2061edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2062edf8e2afSMika Westerberg  */
2063edf8e2afSMika Westerberg int walk_memory_regions(void *priv,
2064edf8e2afSMika Westerberg     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
206533417e70Sbellard {
20669fa3e853Sbellard     unsigned long start, end;
2067edf8e2afSMika Westerberg     PageDesc *p = NULL;
20689fa3e853Sbellard     int i, j, prot, prot1;
2069edf8e2afSMika Westerberg     int rc = 0;
20709fa3e853Sbellard 
2071edf8e2afSMika Westerberg     start = end = -1;
20729fa3e853Sbellard     prot = 0;
2073edf8e2afSMika Westerberg 
20749fa3e853Sbellard     for (i = 0; i <= L1_SIZE; i++) {
2075edf8e2afSMika Westerberg         p = (i < L1_SIZE) ? l1_map[i] : NULL;
20769fa3e853Sbellard         for (j = 0; j < L2_SIZE; j++) {
2077edf8e2afSMika Westerberg             prot1 = (p == NULL) ? 0 : p[j].flags;
2078edf8e2afSMika Westerberg             /*
2079edf8e2afSMika Westerberg              * "region" is one continuous chunk of memory
2080edf8e2afSMika Westerberg              * that has same protection flags set.
2081edf8e2afSMika Westerberg              */
20829fa3e853Sbellard             if (prot1 != prot) {
20839fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
20849fa3e853Sbellard                 if (start != -1) {
2085edf8e2afSMika Westerberg                     rc = (*fn)(priv, start, end, prot);
2086edf8e2afSMika Westerberg                     /* callback can stop iteration by returning != 0 */
2087edf8e2afSMika Westerberg                     if (rc != 0)
2088edf8e2afSMika Westerberg                         return (rc);
208933417e70Sbellard                 }
20909fa3e853Sbellard                 if (prot1 != 0)
20919fa3e853Sbellard                     start = end;
20929fa3e853Sbellard                 else
20939fa3e853Sbellard                     start = -1;
20949fa3e853Sbellard                 prot = prot1;
20959fa3e853Sbellard             }
2096edf8e2afSMika Westerberg             if (p == NULL)
20979fa3e853Sbellard                 break;
20989fa3e853Sbellard         }
20999fa3e853Sbellard     }
2100edf8e2afSMika Westerberg     return (rc);
2101edf8e2afSMika Westerberg }
2102edf8e2afSMika Westerberg 
2103edf8e2afSMika Westerberg static int dump_region(void *priv, unsigned long start,
2104edf8e2afSMika Westerberg     unsigned long end, unsigned long prot)
2105edf8e2afSMika Westerberg {
2106edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2107edf8e2afSMika Westerberg 
2108edf8e2afSMika Westerberg     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2109edf8e2afSMika Westerberg         start, end, end - start,
2110edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2111edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2112edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2113edf8e2afSMika Westerberg 
2114edf8e2afSMika Westerberg     return (0);
2115edf8e2afSMika Westerberg }
2116edf8e2afSMika Westerberg 
2117edf8e2afSMika Westerberg /* dump memory mappings */
2118edf8e2afSMika Westerberg void page_dump(FILE *f)
2119edf8e2afSMika Westerberg {
2120edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2121edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2122edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
21239fa3e853Sbellard }
21249fa3e853Sbellard 
212553a5960aSpbrook int page_get_flags(target_ulong address)
21269fa3e853Sbellard {
21279fa3e853Sbellard     PageDesc *p;
21289fa3e853Sbellard 
21299fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
21309fa3e853Sbellard     if (!p)
21319fa3e853Sbellard         return 0;
21329fa3e853Sbellard     return p->flags;
21339fa3e853Sbellard }
21349fa3e853Sbellard 
21359fa3e853Sbellard /* modify the flags of a page and invalidate the code if
2136ccbb4d44SStuart Brady    necessary. The flag PAGE_WRITE_ORG is positioned automatically
21379fa3e853Sbellard    depending on PAGE_WRITE */
213853a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
21399fa3e853Sbellard {
21409fa3e853Sbellard     PageDesc *p;
214153a5960aSpbrook     target_ulong addr;
21429fa3e853Sbellard 
2143c8a706feSpbrook     /* mmap_lock should already be held.  */
21449fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
21459fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
21469fa3e853Sbellard     if (flags & PAGE_WRITE)
21479fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
21489fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21499fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
215017e2377aSpbrook         /* We may be called for host regions that are outside guest
215117e2377aSpbrook            address space.  */
215217e2377aSpbrook         if (!p)
215317e2377aSpbrook             return;
21549fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
21559fa3e853Sbellard            inside */
21569fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
21579fa3e853Sbellard             (flags & PAGE_WRITE) &&
21589fa3e853Sbellard             p->first_tb) {
2159d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
21609fa3e853Sbellard         }
21619fa3e853Sbellard         p->flags = flags;
21629fa3e853Sbellard     }
21639fa3e853Sbellard }
21649fa3e853Sbellard 
21653d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
21663d97b40bSths {
21673d97b40bSths     PageDesc *p;
21683d97b40bSths     target_ulong end;
21693d97b40bSths     target_ulong addr;
21703d97b40bSths 
217155f280c9Sbalrog     if (start + len < start)
217255f280c9Sbalrog         /* we've wrapped around */
217355f280c9Sbalrog         return -1;
217455f280c9Sbalrog 
21753d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
21763d97b40bSths     start = start & TARGET_PAGE_MASK;
21773d97b40bSths 
21783d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21793d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21803d97b40bSths         if( !p )
21813d97b40bSths             return -1;
21823d97b40bSths         if( !(p->flags & PAGE_VALID) )
21833d97b40bSths             return -1;
21843d97b40bSths 
2185dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21863d97b40bSths             return -1;
2187dae3270cSbellard         if (flags & PAGE_WRITE) {
2188dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21893d97b40bSths                 return -1;
2190dae3270cSbellard             /* unprotect the page if it was put read-only because it
2191dae3270cSbellard                contains translated code */
2192dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2193dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2194dae3270cSbellard                     return -1;
2195dae3270cSbellard             }
2196dae3270cSbellard             return 0;
2197dae3270cSbellard         }
21983d97b40bSths     }
21993d97b40bSths     return 0;
22003d97b40bSths }
22013d97b40bSths 
22029fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2203ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
220453a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
22059fa3e853Sbellard {
22069fa3e853Sbellard     unsigned int page_index, prot, pindex;
22079fa3e853Sbellard     PageDesc *p, *p1;
220853a5960aSpbrook     target_ulong host_start, host_end, addr;
22099fa3e853Sbellard 
2210c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2211c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2212c8a706feSpbrook        practice it seems to be ok.  */
2213c8a706feSpbrook     mmap_lock();
2214c8a706feSpbrook 
221583fb7adfSbellard     host_start = address & qemu_host_page_mask;
22169fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
22179fa3e853Sbellard     p1 = page_find(page_index);
2218c8a706feSpbrook     if (!p1) {
2219c8a706feSpbrook         mmap_unlock();
22209fa3e853Sbellard         return 0;
2221c8a706feSpbrook     }
222283fb7adfSbellard     host_end = host_start + qemu_host_page_size;
22239fa3e853Sbellard     p = p1;
22249fa3e853Sbellard     prot = 0;
22259fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
22269fa3e853Sbellard         prot |= p->flags;
22279fa3e853Sbellard         p++;
22289fa3e853Sbellard     }
22299fa3e853Sbellard     /* if the page was really writable, then we change its
22309fa3e853Sbellard        protection back to writable */
22319fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
22329fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
22339fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
223453a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
22359fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
22369fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
22379fa3e853Sbellard             /* and since the content will be modified, we must invalidate
22389fa3e853Sbellard                the corresponding translated code. */
2239d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
22409fa3e853Sbellard #ifdef DEBUG_TB_CHECK
22419fa3e853Sbellard             tb_invalidate_check(address);
22429fa3e853Sbellard #endif
2243c8a706feSpbrook             mmap_unlock();
22449fa3e853Sbellard             return 1;
22459fa3e853Sbellard         }
22469fa3e853Sbellard     }
2247c8a706feSpbrook     mmap_unlock();
22489fa3e853Sbellard     return 0;
22499fa3e853Sbellard }
22509fa3e853Sbellard 
22516a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
22526a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
22531ccde1cbSbellard {
22541ccde1cbSbellard }
22559fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
225633417e70Sbellard 
2257e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
22588da3ff18Spbrook 
2259db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
22608da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset);
226100f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
22628da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset);
2263db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2264db7b5426Sblueswir1                       need_subpage)                                     \
2265db7b5426Sblueswir1     do {                                                                \
2266db7b5426Sblueswir1         if (addr > start_addr)                                          \
2267db7b5426Sblueswir1             start_addr2 = 0;                                            \
2268db7b5426Sblueswir1         else {                                                          \
2269db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2270db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2271db7b5426Sblueswir1                 need_subpage = 1;                                       \
2272db7b5426Sblueswir1         }                                                               \
2273db7b5426Sblueswir1                                                                         \
227449e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2275db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2276db7b5426Sblueswir1         else {                                                          \
2277db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2278db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2279db7b5426Sblueswir1                 need_subpage = 1;                                       \
2280db7b5426Sblueswir1         }                                                               \
2281db7b5426Sblueswir1     } while (0)
2282db7b5426Sblueswir1 
228333417e70Sbellard /* register physical memory. 'size' must be a multiple of the target
228433417e70Sbellard    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
22858da3ff18Spbrook    io memory page.  The address used when calling the IO function is
22868da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2287ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
22888da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
22898da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
22908da3ff18Spbrook void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
229100f82b8aSaurel32                                          ram_addr_t size,
22928da3ff18Spbrook                                          ram_addr_t phys_offset,
22938da3ff18Spbrook                                          ram_addr_t region_offset)
229433417e70Sbellard {
2295108c49b8Sbellard     target_phys_addr_t addr, end_addr;
229692e873b9Sbellard     PhysPageDesc *p;
22979d42037bSbellard     CPUState *env;
229800f82b8aSaurel32     ram_addr_t orig_size = size;
2299db7b5426Sblueswir1     void *subpage;
230033417e70Sbellard 
23017ba1e619Saliguori     if (kvm_enabled())
23027ba1e619Saliguori         kvm_set_phys_mem(start_addr, size, phys_offset);
23037ba1e619Saliguori 
230467c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
230567c4d23cSpbrook         region_offset = start_addr;
230667c4d23cSpbrook     }
23078da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
23085fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
230949e9fba2Sblueswir1     end_addr = start_addr + (target_phys_addr_t)size;
231049e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2311db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2312db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
231300f82b8aSaurel32             ram_addr_t orig_memory = p->phys_offset;
2314db7b5426Sblueswir1             target_phys_addr_t start_addr2, end_addr2;
2315db7b5426Sblueswir1             int need_subpage = 0;
2316db7b5426Sblueswir1 
2317db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2318db7b5426Sblueswir1                           need_subpage);
23194254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2320db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2321db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23228da3ff18Spbrook                                            &p->phys_offset, orig_memory,
23238da3ff18Spbrook                                            p->region_offset);
2324db7b5426Sblueswir1                 } else {
2325db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2326db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2327db7b5426Sblueswir1                 }
23288da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
23298da3ff18Spbrook                                  region_offset);
23308da3ff18Spbrook                 p->region_offset = 0;
2331db7b5426Sblueswir1             } else {
2332db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2333db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2334db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2335db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2336db7b5426Sblueswir1             }
2337db7b5426Sblueswir1         } else {
2338108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
23399fa3e853Sbellard             p->phys_offset = phys_offset;
23408da3ff18Spbrook             p->region_offset = region_offset;
23412a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
23428da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
234333417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
23448da3ff18Spbrook             } else {
2345db7b5426Sblueswir1                 target_phys_addr_t start_addr2, end_addr2;
2346db7b5426Sblueswir1                 int need_subpage = 0;
2347db7b5426Sblueswir1 
2348db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2349db7b5426Sblueswir1                               end_addr2, need_subpage);
2350db7b5426Sblueswir1 
23514254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2352db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23538da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
235467c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2355db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
23568da3ff18Spbrook                                      phys_offset, region_offset);
23578da3ff18Spbrook                     p->region_offset = 0;
2358db7b5426Sblueswir1                 }
2359db7b5426Sblueswir1             }
2360db7b5426Sblueswir1         }
23618da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
236233417e70Sbellard     }
23639d42037bSbellard 
23649d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
23659d42037bSbellard        reset the modified entries */
23669d42037bSbellard     /* XXX: slow ! */
23679d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
23689d42037bSbellard         tlb_flush(env, 1);
23699d42037bSbellard     }
237033417e70Sbellard }
237133417e70Sbellard 
2372ba863458Sbellard /* XXX: temporary until new memory mapping API */
237300f82b8aSaurel32 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2374ba863458Sbellard {
2375ba863458Sbellard     PhysPageDesc *p;
2376ba863458Sbellard 
2377ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2378ba863458Sbellard     if (!p)
2379ba863458Sbellard         return IO_MEM_UNASSIGNED;
2380ba863458Sbellard     return p->phys_offset;
2381ba863458Sbellard }
2382ba863458Sbellard 
2383f65ed4c1Saliguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2384f65ed4c1Saliguori {
2385f65ed4c1Saliguori     if (kvm_enabled())
2386f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2387f65ed4c1Saliguori }
2388f65ed4c1Saliguori 
2389f65ed4c1Saliguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2390f65ed4c1Saliguori {
2391f65ed4c1Saliguori     if (kvm_enabled())
2392f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2393f65ed4c1Saliguori }
2394f65ed4c1Saliguori 
239594a6b54fSpbrook ram_addr_t qemu_ram_alloc(ram_addr_t size)
239694a6b54fSpbrook {
239794a6b54fSpbrook     RAMBlock *new_block;
239894a6b54fSpbrook 
239994a6b54fSpbrook     size = TARGET_PAGE_ALIGN(size);
240094a6b54fSpbrook     new_block = qemu_malloc(sizeof(*new_block));
240194a6b54fSpbrook 
240294a6b54fSpbrook     new_block->host = qemu_vmalloc(size);
240394a6b54fSpbrook     new_block->offset = last_ram_offset;
240494a6b54fSpbrook     new_block->length = size;
240594a6b54fSpbrook 
240694a6b54fSpbrook     new_block->next = ram_blocks;
240794a6b54fSpbrook     ram_blocks = new_block;
240894a6b54fSpbrook 
240994a6b54fSpbrook     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
241094a6b54fSpbrook         (last_ram_offset + size) >> TARGET_PAGE_BITS);
241194a6b54fSpbrook     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
241294a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
241394a6b54fSpbrook 
241494a6b54fSpbrook     last_ram_offset += size;
241594a6b54fSpbrook 
24166f0437e8SJan Kiszka     if (kvm_enabled())
24176f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
24186f0437e8SJan Kiszka 
241994a6b54fSpbrook     return new_block->offset;
242094a6b54fSpbrook }
2421e9a1ab19Sbellard 
2422e9a1ab19Sbellard void qemu_ram_free(ram_addr_t addr)
2423e9a1ab19Sbellard {
242494a6b54fSpbrook     /* TODO: implement this.  */
2425e9a1ab19Sbellard }
2426e9a1ab19Sbellard 
2427dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
24285579c7f3Spbrook    With the exception of the softmmu code in this file, this should
24295579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
24305579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
24315579c7f3Spbrook 
24325579c7f3Spbrook    It should not be used for general purpose DMA.
24335579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
24345579c7f3Spbrook  */
2435dc828ca1Spbrook void *qemu_get_ram_ptr(ram_addr_t addr)
2436dc828ca1Spbrook {
243794a6b54fSpbrook     RAMBlock *prev;
243894a6b54fSpbrook     RAMBlock **prevp;
243994a6b54fSpbrook     RAMBlock *block;
244094a6b54fSpbrook 
244194a6b54fSpbrook     prev = NULL;
244294a6b54fSpbrook     prevp = &ram_blocks;
244394a6b54fSpbrook     block = ram_blocks;
244494a6b54fSpbrook     while (block && (block->offset > addr
244594a6b54fSpbrook                      || block->offset + block->length <= addr)) {
244694a6b54fSpbrook         if (prev)
244794a6b54fSpbrook           prevp = &prev->next;
244894a6b54fSpbrook         prev = block;
244994a6b54fSpbrook         block = block->next;
245094a6b54fSpbrook     }
245194a6b54fSpbrook     if (!block) {
245294a6b54fSpbrook         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
245394a6b54fSpbrook         abort();
245494a6b54fSpbrook     }
245594a6b54fSpbrook     /* Move this entry to to start of the list.  */
245694a6b54fSpbrook     if (prev) {
245794a6b54fSpbrook         prev->next = block->next;
245894a6b54fSpbrook         block->next = *prevp;
245994a6b54fSpbrook         *prevp = block;
246094a6b54fSpbrook     }
246194a6b54fSpbrook     return block->host + (addr - block->offset);
2462dc828ca1Spbrook }
2463dc828ca1Spbrook 
24645579c7f3Spbrook /* Some of the softmmu routines need to translate from a host pointer
24655579c7f3Spbrook    (typically a TLB entry) back to a ram offset.  */
24665579c7f3Spbrook ram_addr_t qemu_ram_addr_from_host(void *ptr)
24675579c7f3Spbrook {
246894a6b54fSpbrook     RAMBlock *prev;
246994a6b54fSpbrook     RAMBlock **prevp;
247094a6b54fSpbrook     RAMBlock *block;
247194a6b54fSpbrook     uint8_t *host = ptr;
247294a6b54fSpbrook 
247394a6b54fSpbrook     prev = NULL;
247494a6b54fSpbrook     prevp = &ram_blocks;
247594a6b54fSpbrook     block = ram_blocks;
247694a6b54fSpbrook     while (block && (block->host > host
247794a6b54fSpbrook                      || block->host + block->length <= host)) {
247894a6b54fSpbrook         if (prev)
247994a6b54fSpbrook           prevp = &prev->next;
248094a6b54fSpbrook         prev = block;
248194a6b54fSpbrook         block = block->next;
248294a6b54fSpbrook     }
248394a6b54fSpbrook     if (!block) {
248494a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
248594a6b54fSpbrook         abort();
248694a6b54fSpbrook     }
248794a6b54fSpbrook     return block->offset + (host - block->host);
24885579c7f3Spbrook }
24895579c7f3Spbrook 
2490a4193c8aSbellard static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
249133417e70Sbellard {
249267d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2493ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
249467d3b957Spbrook #endif
24950a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2496e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2497e18231a3Sblueswir1 #endif
2498e18231a3Sblueswir1     return 0;
2499e18231a3Sblueswir1 }
2500e18231a3Sblueswir1 
2501e18231a3Sblueswir1 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2502e18231a3Sblueswir1 {
2503e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2504e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2505e18231a3Sblueswir1 #endif
25060a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2507e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2508e18231a3Sblueswir1 #endif
2509e18231a3Sblueswir1     return 0;
2510e18231a3Sblueswir1 }
2511e18231a3Sblueswir1 
2512e18231a3Sblueswir1 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2513e18231a3Sblueswir1 {
2514e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2515e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2516e18231a3Sblueswir1 #endif
25170a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2518e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2519b4f0a316Sblueswir1 #endif
252033417e70Sbellard     return 0;
252133417e70Sbellard }
252233417e70Sbellard 
2523a4193c8aSbellard static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
252433417e70Sbellard {
252567d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2526ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
252767d3b957Spbrook #endif
25280a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2529e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2530e18231a3Sblueswir1 #endif
2531e18231a3Sblueswir1 }
2532e18231a3Sblueswir1 
2533e18231a3Sblueswir1 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2534e18231a3Sblueswir1 {
2535e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2536e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2537e18231a3Sblueswir1 #endif
25380a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2539e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2540e18231a3Sblueswir1 #endif
2541e18231a3Sblueswir1 }
2542e18231a3Sblueswir1 
2543e18231a3Sblueswir1 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2544e18231a3Sblueswir1 {
2545e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2546e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2547e18231a3Sblueswir1 #endif
25480a6f8a6dSedgar_igl #if defined(TARGET_SPARC)
2549e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2550b4f0a316Sblueswir1 #endif
255133417e70Sbellard }
255233417e70Sbellard 
2553d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
255433417e70Sbellard     unassigned_mem_readb,
2555e18231a3Sblueswir1     unassigned_mem_readw,
2556e18231a3Sblueswir1     unassigned_mem_readl,
255733417e70Sbellard };
255833417e70Sbellard 
2559d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
256033417e70Sbellard     unassigned_mem_writeb,
2561e18231a3Sblueswir1     unassigned_mem_writew,
2562e18231a3Sblueswir1     unassigned_mem_writel,
256333417e70Sbellard };
256433417e70Sbellard 
25650f459d16Spbrook static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
25660f459d16Spbrook                                 uint32_t val)
25671ccde1cbSbellard {
25683a7d929eSbellard     int dirty_flags;
25693a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25703a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25713a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
25723a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
25733a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25743a7d929eSbellard #endif
25753a7d929eSbellard     }
25765579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
2577f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2578f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2579f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2580f23db169Sbellard        flushed */
2581f23db169Sbellard     if (dirty_flags == 0xff)
25822e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
25831ccde1cbSbellard }
25841ccde1cbSbellard 
25850f459d16Spbrook static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
25860f459d16Spbrook                                 uint32_t val)
25871ccde1cbSbellard {
25883a7d929eSbellard     int dirty_flags;
25893a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25903a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25913a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
25923a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
25933a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25943a7d929eSbellard #endif
25953a7d929eSbellard     }
25965579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
2597f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2598f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2599f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2600f23db169Sbellard        flushed */
2601f23db169Sbellard     if (dirty_flags == 0xff)
26022e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
26031ccde1cbSbellard }
26041ccde1cbSbellard 
26050f459d16Spbrook static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
26060f459d16Spbrook                                 uint32_t val)
26071ccde1cbSbellard {
26083a7d929eSbellard     int dirty_flags;
26093a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26103a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
26113a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
26123a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
26133a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26143a7d929eSbellard #endif
26153a7d929eSbellard     }
26165579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
2617f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2618f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2619f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2620f23db169Sbellard        flushed */
2621f23db169Sbellard     if (dirty_flags == 0xff)
26222e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
26231ccde1cbSbellard }
26241ccde1cbSbellard 
2625d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
26263a7d929eSbellard     NULL, /* never used */
26273a7d929eSbellard     NULL, /* never used */
26283a7d929eSbellard     NULL, /* never used */
26293a7d929eSbellard };
26303a7d929eSbellard 
2631d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
26321ccde1cbSbellard     notdirty_mem_writeb,
26331ccde1cbSbellard     notdirty_mem_writew,
26341ccde1cbSbellard     notdirty_mem_writel,
26351ccde1cbSbellard };
26361ccde1cbSbellard 
26370f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2638b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
26390f459d16Spbrook {
26400f459d16Spbrook     CPUState *env = cpu_single_env;
264106d55cc1Saliguori     target_ulong pc, cs_base;
264206d55cc1Saliguori     TranslationBlock *tb;
26430f459d16Spbrook     target_ulong vaddr;
2644a1d1bb31Saliguori     CPUWatchpoint *wp;
264506d55cc1Saliguori     int cpu_flags;
26460f459d16Spbrook 
264706d55cc1Saliguori     if (env->watchpoint_hit) {
264806d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
264906d55cc1Saliguori          * the debug interrupt so that is will trigger after the
265006d55cc1Saliguori          * current instruction. */
265106d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
265206d55cc1Saliguori         return;
265306d55cc1Saliguori     }
26542e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2655c0ce998eSaliguori     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2656b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2657b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
26586e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
26596e140f28Saliguori             if (!env->watchpoint_hit) {
2660a1d1bb31Saliguori                 env->watchpoint_hit = wp;
266106d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
266206d55cc1Saliguori                 if (!tb) {
26636e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
26646e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
266506d55cc1Saliguori                 }
266606d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
266706d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
266806d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
266906d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
267006d55cc1Saliguori                 } else {
267106d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
267206d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
267306d55cc1Saliguori                 }
267406d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
26750f459d16Spbrook             }
26766e140f28Saliguori         } else {
26776e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
26786e140f28Saliguori         }
26790f459d16Spbrook     }
26800f459d16Spbrook }
26810f459d16Spbrook 
26826658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
26836658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
26846658ffb8Spbrook    phys routines.  */
26856658ffb8Spbrook static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
26866658ffb8Spbrook {
2687b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
26886658ffb8Spbrook     return ldub_phys(addr);
26896658ffb8Spbrook }
26906658ffb8Spbrook 
26916658ffb8Spbrook static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
26926658ffb8Spbrook {
2693b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
26946658ffb8Spbrook     return lduw_phys(addr);
26956658ffb8Spbrook }
26966658ffb8Spbrook 
26976658ffb8Spbrook static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
26986658ffb8Spbrook {
2699b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
27006658ffb8Spbrook     return ldl_phys(addr);
27016658ffb8Spbrook }
27026658ffb8Spbrook 
27036658ffb8Spbrook static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
27046658ffb8Spbrook                              uint32_t val)
27056658ffb8Spbrook {
2706b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
27076658ffb8Spbrook     stb_phys(addr, val);
27086658ffb8Spbrook }
27096658ffb8Spbrook 
27106658ffb8Spbrook static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
27116658ffb8Spbrook                              uint32_t val)
27126658ffb8Spbrook {
2713b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
27146658ffb8Spbrook     stw_phys(addr, val);
27156658ffb8Spbrook }
27166658ffb8Spbrook 
27176658ffb8Spbrook static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
27186658ffb8Spbrook                              uint32_t val)
27196658ffb8Spbrook {
2720b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
27216658ffb8Spbrook     stl_phys(addr, val);
27226658ffb8Spbrook }
27236658ffb8Spbrook 
2724d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
27256658ffb8Spbrook     watch_mem_readb,
27266658ffb8Spbrook     watch_mem_readw,
27276658ffb8Spbrook     watch_mem_readl,
27286658ffb8Spbrook };
27296658ffb8Spbrook 
2730d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
27316658ffb8Spbrook     watch_mem_writeb,
27326658ffb8Spbrook     watch_mem_writew,
27336658ffb8Spbrook     watch_mem_writel,
27346658ffb8Spbrook };
27356658ffb8Spbrook 
2736db7b5426Sblueswir1 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2737db7b5426Sblueswir1                                  unsigned int len)
2738db7b5426Sblueswir1 {
2739db7b5426Sblueswir1     uint32_t ret;
2740db7b5426Sblueswir1     unsigned int idx;
2741db7b5426Sblueswir1 
27428da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2743db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2744db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2745db7b5426Sblueswir1            mmio, len, addr, idx);
2746db7b5426Sblueswir1 #endif
27478da3ff18Spbrook     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
27488da3ff18Spbrook                                        addr + mmio->region_offset[idx][0][len]);
2749db7b5426Sblueswir1 
2750db7b5426Sblueswir1     return ret;
2751db7b5426Sblueswir1 }
2752db7b5426Sblueswir1 
2753db7b5426Sblueswir1 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2754db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2755db7b5426Sblueswir1 {
2756db7b5426Sblueswir1     unsigned int idx;
2757db7b5426Sblueswir1 
27588da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2759db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2760db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2761db7b5426Sblueswir1            mmio, len, addr, idx, value);
2762db7b5426Sblueswir1 #endif
27638da3ff18Spbrook     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
27648da3ff18Spbrook                                   addr + mmio->region_offset[idx][1][len],
27658da3ff18Spbrook                                   value);
2766db7b5426Sblueswir1 }
2767db7b5426Sblueswir1 
2768db7b5426Sblueswir1 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2769db7b5426Sblueswir1 {
2770db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2771db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2772db7b5426Sblueswir1 #endif
2773db7b5426Sblueswir1 
2774db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2775db7b5426Sblueswir1 }
2776db7b5426Sblueswir1 
2777db7b5426Sblueswir1 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2778db7b5426Sblueswir1                             uint32_t value)
2779db7b5426Sblueswir1 {
2780db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2781db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2782db7b5426Sblueswir1 #endif
2783db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2784db7b5426Sblueswir1 }
2785db7b5426Sblueswir1 
2786db7b5426Sblueswir1 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2787db7b5426Sblueswir1 {
2788db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2789db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2790db7b5426Sblueswir1 #endif
2791db7b5426Sblueswir1 
2792db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2793db7b5426Sblueswir1 }
2794db7b5426Sblueswir1 
2795db7b5426Sblueswir1 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2796db7b5426Sblueswir1                             uint32_t value)
2797db7b5426Sblueswir1 {
2798db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2799db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2800db7b5426Sblueswir1 #endif
2801db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2802db7b5426Sblueswir1 }
2803db7b5426Sblueswir1 
2804db7b5426Sblueswir1 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2805db7b5426Sblueswir1 {
2806db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2807db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2808db7b5426Sblueswir1 #endif
2809db7b5426Sblueswir1 
2810db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2811db7b5426Sblueswir1 }
2812db7b5426Sblueswir1 
2813db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2814db7b5426Sblueswir1                          target_phys_addr_t addr, uint32_t value)
2815db7b5426Sblueswir1 {
2816db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2817db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2818db7b5426Sblueswir1 #endif
2819db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2820db7b5426Sblueswir1 }
2821db7b5426Sblueswir1 
2822d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
2823db7b5426Sblueswir1     &subpage_readb,
2824db7b5426Sblueswir1     &subpage_readw,
2825db7b5426Sblueswir1     &subpage_readl,
2826db7b5426Sblueswir1 };
2827db7b5426Sblueswir1 
2828d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
2829db7b5426Sblueswir1     &subpage_writeb,
2830db7b5426Sblueswir1     &subpage_writew,
2831db7b5426Sblueswir1     &subpage_writel,
2832db7b5426Sblueswir1 };
2833db7b5426Sblueswir1 
2834db7b5426Sblueswir1 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
28358da3ff18Spbrook                              ram_addr_t memory, ram_addr_t region_offset)
2836db7b5426Sblueswir1 {
2837db7b5426Sblueswir1     int idx, eidx;
28384254fab8Sblueswir1     unsigned int i;
2839db7b5426Sblueswir1 
2840db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2841db7b5426Sblueswir1         return -1;
2842db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2843db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2844db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
28450bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2846db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2847db7b5426Sblueswir1 #endif
2848db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2849db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
28504254fab8Sblueswir1         for (i = 0; i < 4; i++) {
28513ee89922Sblueswir1             if (io_mem_read[memory][i]) {
28523ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
28533ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
28548da3ff18Spbrook                 mmio->region_offset[idx][0][i] = region_offset;
28554254fab8Sblueswir1             }
28563ee89922Sblueswir1             if (io_mem_write[memory][i]) {
28573ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
28583ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
28598da3ff18Spbrook                 mmio->region_offset[idx][1][i] = region_offset;
28603ee89922Sblueswir1             }
28613ee89922Sblueswir1         }
2862db7b5426Sblueswir1     }
2863db7b5426Sblueswir1 
2864db7b5426Sblueswir1     return 0;
2865db7b5426Sblueswir1 }
2866db7b5426Sblueswir1 
286700f82b8aSaurel32 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
28688da3ff18Spbrook                            ram_addr_t orig_memory, ram_addr_t region_offset)
2869db7b5426Sblueswir1 {
2870db7b5426Sblueswir1     subpage_t *mmio;
2871db7b5426Sblueswir1     int subpage_memory;
2872db7b5426Sblueswir1 
2873db7b5426Sblueswir1     mmio = qemu_mallocz(sizeof(subpage_t));
28741eec614bSaliguori 
2875db7b5426Sblueswir1     mmio->base = base;
28761eed09cbSAvi Kivity     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2877db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2878db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2879db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2880db7b5426Sblueswir1 #endif
2881db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
28828da3ff18Spbrook     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
28838da3ff18Spbrook                          region_offset);
2884db7b5426Sblueswir1 
2885db7b5426Sblueswir1     return mmio;
2886db7b5426Sblueswir1 }
2887db7b5426Sblueswir1 
288888715657Saliguori static int get_free_io_mem_idx(void)
288988715657Saliguori {
289088715657Saliguori     int i;
289188715657Saliguori 
289288715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
289388715657Saliguori         if (!io_mem_used[i]) {
289488715657Saliguori             io_mem_used[i] = 1;
289588715657Saliguori             return i;
289688715657Saliguori         }
289788715657Saliguori 
289888715657Saliguori     return -1;
289988715657Saliguori }
290088715657Saliguori 
290133417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
290233417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
29030b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
29043ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
29054254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
29064254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
29074254fab8Sblueswir1    returned if error. */
29081eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
2909d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
2910d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
2911a4193c8aSbellard                                         void *opaque)
291233417e70Sbellard {
29134254fab8Sblueswir1     int i, subwidth = 0;
291433417e70Sbellard 
291533417e70Sbellard     if (io_index <= 0) {
291688715657Saliguori         io_index = get_free_io_mem_idx();
291788715657Saliguori         if (io_index == -1)
291888715657Saliguori             return io_index;
291933417e70Sbellard     } else {
29201eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
292133417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
292233417e70Sbellard             return -1;
292333417e70Sbellard     }
292433417e70Sbellard 
292533417e70Sbellard     for(i = 0;i < 3; i++) {
29264254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
29274254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
292833417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
292933417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
293033417e70Sbellard     }
2931a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
29324254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
293333417e70Sbellard }
293461382a50Sbellard 
2935d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2936d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
29371eed09cbSAvi Kivity                            void *opaque)
29381eed09cbSAvi Kivity {
29391eed09cbSAvi Kivity     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
29401eed09cbSAvi Kivity }
29411eed09cbSAvi Kivity 
294288715657Saliguori void cpu_unregister_io_memory(int io_table_address)
294388715657Saliguori {
294488715657Saliguori     int i;
294588715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
294688715657Saliguori 
294788715657Saliguori     for (i=0;i < 3; i++) {
294888715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
294988715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
295088715657Saliguori     }
295188715657Saliguori     io_mem_opaque[io_index] = NULL;
295288715657Saliguori     io_mem_used[io_index] = 0;
295388715657Saliguori }
295488715657Saliguori 
2955e9179ce1SAvi Kivity static void io_mem_init(void)
2956e9179ce1SAvi Kivity {
2957e9179ce1SAvi Kivity     int i;
2958e9179ce1SAvi Kivity 
2959e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2960e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2961e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2962e9179ce1SAvi Kivity     for (i=0; i<5; i++)
2963e9179ce1SAvi Kivity         io_mem_used[i] = 1;
2964e9179ce1SAvi Kivity 
2965e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
2966e9179ce1SAvi Kivity                                           watch_mem_write, NULL);
2967e9179ce1SAvi Kivity }
2968e9179ce1SAvi Kivity 
2969e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2970e2eef170Spbrook 
297113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
297213eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
29732e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
297413eb76e0Sbellard                             int len, int is_write)
297513eb76e0Sbellard {
297613eb76e0Sbellard     int l, flags;
297713eb76e0Sbellard     target_ulong page;
297853a5960aSpbrook     void * p;
297913eb76e0Sbellard 
298013eb76e0Sbellard     while (len > 0) {
298113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
298213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
298313eb76e0Sbellard         if (l > len)
298413eb76e0Sbellard             l = len;
298513eb76e0Sbellard         flags = page_get_flags(page);
298613eb76e0Sbellard         if (!(flags & PAGE_VALID))
298713eb76e0Sbellard             return;
298813eb76e0Sbellard         if (is_write) {
298913eb76e0Sbellard             if (!(flags & PAGE_WRITE))
299013eb76e0Sbellard                 return;
2991579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
299272fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2993579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
2994579a97f7Sbellard                 return;
299572fb7daaSaurel32             memcpy(p, buf, l);
299672fb7daaSaurel32             unlock_user(p, addr, l);
299713eb76e0Sbellard         } else {
299813eb76e0Sbellard             if (!(flags & PAGE_READ))
299913eb76e0Sbellard                 return;
3000579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
300172fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3002579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
3003579a97f7Sbellard                 return;
300472fb7daaSaurel32             memcpy(buf, p, l);
30055b257578Saurel32             unlock_user(p, addr, 0);
300613eb76e0Sbellard         }
300713eb76e0Sbellard         len -= l;
300813eb76e0Sbellard         buf += l;
300913eb76e0Sbellard         addr += l;
301013eb76e0Sbellard     }
301113eb76e0Sbellard }
30128df1cd07Sbellard 
301313eb76e0Sbellard #else
30142e12669aSbellard void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
301513eb76e0Sbellard                             int len, int is_write)
301613eb76e0Sbellard {
301713eb76e0Sbellard     int l, io_index;
301813eb76e0Sbellard     uint8_t *ptr;
301913eb76e0Sbellard     uint32_t val;
30202e12669aSbellard     target_phys_addr_t page;
30212e12669aSbellard     unsigned long pd;
302292e873b9Sbellard     PhysPageDesc *p;
302313eb76e0Sbellard 
302413eb76e0Sbellard     while (len > 0) {
302513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
302613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
302713eb76e0Sbellard         if (l > len)
302813eb76e0Sbellard             l = len;
302992e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
303013eb76e0Sbellard         if (!p) {
303113eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
303213eb76e0Sbellard         } else {
303313eb76e0Sbellard             pd = p->phys_offset;
303413eb76e0Sbellard         }
303513eb76e0Sbellard 
303613eb76e0Sbellard         if (is_write) {
30373a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
30386c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
303913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
30408da3ff18Spbrook                 if (p)
30416c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
30426a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
30436a00d601Sbellard                    potential bugs */
30446c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
30451c213d19Sbellard                     /* 32 bit write access */
3046c27004ecSbellard                     val = ldl_p(buf);
30476c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
304813eb76e0Sbellard                     l = 4;
30496c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
30501c213d19Sbellard                     /* 16 bit write access */
3051c27004ecSbellard                     val = lduw_p(buf);
30526c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
305313eb76e0Sbellard                     l = 2;
305413eb76e0Sbellard                 } else {
30551c213d19Sbellard                     /* 8 bit write access */
3056c27004ecSbellard                     val = ldub_p(buf);
30576c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
305813eb76e0Sbellard                     l = 1;
305913eb76e0Sbellard                 }
306013eb76e0Sbellard             } else {
3061b448f2f3Sbellard                 unsigned long addr1;
3062b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
306313eb76e0Sbellard                 /* RAM case */
30645579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
306513eb76e0Sbellard                 memcpy(ptr, buf, l);
30663a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3067b448f2f3Sbellard                     /* invalidate code */
3068b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3069b448f2f3Sbellard                     /* set dirty bit */
3070f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3071f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
307213eb76e0Sbellard                 }
30733a7d929eSbellard             }
307413eb76e0Sbellard         } else {
30752a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
30762a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
30776c2934dbSaurel32                 target_phys_addr_t addr1 = addr;
307813eb76e0Sbellard                 /* I/O case */
307913eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
30808da3ff18Spbrook                 if (p)
30816c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
30826c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
308313eb76e0Sbellard                     /* 32 bit read access */
30846c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3085c27004ecSbellard                     stl_p(buf, val);
308613eb76e0Sbellard                     l = 4;
30876c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
308813eb76e0Sbellard                     /* 16 bit read access */
30896c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3090c27004ecSbellard                     stw_p(buf, val);
309113eb76e0Sbellard                     l = 2;
309213eb76e0Sbellard                 } else {
30931c213d19Sbellard                     /* 8 bit read access */
30946c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3095c27004ecSbellard                     stb_p(buf, val);
309613eb76e0Sbellard                     l = 1;
309713eb76e0Sbellard                 }
309813eb76e0Sbellard             } else {
309913eb76e0Sbellard                 /* RAM case */
31005579c7f3Spbrook                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
310113eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
310213eb76e0Sbellard                 memcpy(buf, ptr, l);
310313eb76e0Sbellard             }
310413eb76e0Sbellard         }
310513eb76e0Sbellard         len -= l;
310613eb76e0Sbellard         buf += l;
310713eb76e0Sbellard         addr += l;
310813eb76e0Sbellard     }
310913eb76e0Sbellard }
31108df1cd07Sbellard 
3111d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3112d0ecd2aaSbellard void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3113d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3114d0ecd2aaSbellard {
3115d0ecd2aaSbellard     int l;
3116d0ecd2aaSbellard     uint8_t *ptr;
3117d0ecd2aaSbellard     target_phys_addr_t page;
3118d0ecd2aaSbellard     unsigned long pd;
3119d0ecd2aaSbellard     PhysPageDesc *p;
3120d0ecd2aaSbellard 
3121d0ecd2aaSbellard     while (len > 0) {
3122d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3123d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3124d0ecd2aaSbellard         if (l > len)
3125d0ecd2aaSbellard             l = len;
3126d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3127d0ecd2aaSbellard         if (!p) {
3128d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3129d0ecd2aaSbellard         } else {
3130d0ecd2aaSbellard             pd = p->phys_offset;
3131d0ecd2aaSbellard         }
3132d0ecd2aaSbellard 
3133d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
31342a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
31352a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3136d0ecd2aaSbellard             /* do nothing */
3137d0ecd2aaSbellard         } else {
3138d0ecd2aaSbellard             unsigned long addr1;
3139d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3140d0ecd2aaSbellard             /* ROM/RAM case */
31415579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3142d0ecd2aaSbellard             memcpy(ptr, buf, l);
3143d0ecd2aaSbellard         }
3144d0ecd2aaSbellard         len -= l;
3145d0ecd2aaSbellard         buf += l;
3146d0ecd2aaSbellard         addr += l;
3147d0ecd2aaSbellard     }
3148d0ecd2aaSbellard }
3149d0ecd2aaSbellard 
31506d16c2f8Saliguori typedef struct {
31516d16c2f8Saliguori     void *buffer;
31526d16c2f8Saliguori     target_phys_addr_t addr;
31536d16c2f8Saliguori     target_phys_addr_t len;
31546d16c2f8Saliguori } BounceBuffer;
31556d16c2f8Saliguori 
31566d16c2f8Saliguori static BounceBuffer bounce;
31576d16c2f8Saliguori 
3158ba223c29Saliguori typedef struct MapClient {
3159ba223c29Saliguori     void *opaque;
3160ba223c29Saliguori     void (*callback)(void *opaque);
3161ba223c29Saliguori     LIST_ENTRY(MapClient) link;
3162ba223c29Saliguori } MapClient;
3163ba223c29Saliguori 
3164ba223c29Saliguori static LIST_HEAD(map_client_list, MapClient) map_client_list
3165ba223c29Saliguori     = LIST_HEAD_INITIALIZER(map_client_list);
3166ba223c29Saliguori 
3167ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3168ba223c29Saliguori {
3169ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3170ba223c29Saliguori 
3171ba223c29Saliguori     client->opaque = opaque;
3172ba223c29Saliguori     client->callback = callback;
3173ba223c29Saliguori     LIST_INSERT_HEAD(&map_client_list, client, link);
3174ba223c29Saliguori     return client;
3175ba223c29Saliguori }
3176ba223c29Saliguori 
3177ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3178ba223c29Saliguori {
3179ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3180ba223c29Saliguori 
3181ba223c29Saliguori     LIST_REMOVE(client, link);
318234d5e948SIsaku Yamahata     qemu_free(client);
3183ba223c29Saliguori }
3184ba223c29Saliguori 
3185ba223c29Saliguori static void cpu_notify_map_clients(void)
3186ba223c29Saliguori {
3187ba223c29Saliguori     MapClient *client;
3188ba223c29Saliguori 
3189ba223c29Saliguori     while (!LIST_EMPTY(&map_client_list)) {
3190ba223c29Saliguori         client = LIST_FIRST(&map_client_list);
3191ba223c29Saliguori         client->callback(client->opaque);
319234d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3193ba223c29Saliguori     }
3194ba223c29Saliguori }
3195ba223c29Saliguori 
31966d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
31976d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
31986d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
31996d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3200ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3201ba223c29Saliguori  * likely to succeed.
32026d16c2f8Saliguori  */
32036d16c2f8Saliguori void *cpu_physical_memory_map(target_phys_addr_t addr,
32046d16c2f8Saliguori                               target_phys_addr_t *plen,
32056d16c2f8Saliguori                               int is_write)
32066d16c2f8Saliguori {
32076d16c2f8Saliguori     target_phys_addr_t len = *plen;
32086d16c2f8Saliguori     target_phys_addr_t done = 0;
32096d16c2f8Saliguori     int l;
32106d16c2f8Saliguori     uint8_t *ret = NULL;
32116d16c2f8Saliguori     uint8_t *ptr;
32126d16c2f8Saliguori     target_phys_addr_t page;
32136d16c2f8Saliguori     unsigned long pd;
32146d16c2f8Saliguori     PhysPageDesc *p;
32156d16c2f8Saliguori     unsigned long addr1;
32166d16c2f8Saliguori 
32176d16c2f8Saliguori     while (len > 0) {
32186d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
32196d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
32206d16c2f8Saliguori         if (l > len)
32216d16c2f8Saliguori             l = len;
32226d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
32236d16c2f8Saliguori         if (!p) {
32246d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
32256d16c2f8Saliguori         } else {
32266d16c2f8Saliguori             pd = p->phys_offset;
32276d16c2f8Saliguori         }
32286d16c2f8Saliguori 
32296d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
32306d16c2f8Saliguori             if (done || bounce.buffer) {
32316d16c2f8Saliguori                 break;
32326d16c2f8Saliguori             }
32336d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
32346d16c2f8Saliguori             bounce.addr = addr;
32356d16c2f8Saliguori             bounce.len = l;
32366d16c2f8Saliguori             if (!is_write) {
32376d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
32386d16c2f8Saliguori             }
32396d16c2f8Saliguori             ptr = bounce.buffer;
32406d16c2f8Saliguori         } else {
32416d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
32425579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
32436d16c2f8Saliguori         }
32446d16c2f8Saliguori         if (!done) {
32456d16c2f8Saliguori             ret = ptr;
32466d16c2f8Saliguori         } else if (ret + done != ptr) {
32476d16c2f8Saliguori             break;
32486d16c2f8Saliguori         }
32496d16c2f8Saliguori 
32506d16c2f8Saliguori         len -= l;
32516d16c2f8Saliguori         addr += l;
32526d16c2f8Saliguori         done += l;
32536d16c2f8Saliguori     }
32546d16c2f8Saliguori     *plen = done;
32556d16c2f8Saliguori     return ret;
32566d16c2f8Saliguori }
32576d16c2f8Saliguori 
32586d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
32596d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
32606d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
32616d16c2f8Saliguori  */
32626d16c2f8Saliguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
32636d16c2f8Saliguori                                int is_write, target_phys_addr_t access_len)
32646d16c2f8Saliguori {
32656d16c2f8Saliguori     if (buffer != bounce.buffer) {
32666d16c2f8Saliguori         if (is_write) {
32675579c7f3Spbrook             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
32686d16c2f8Saliguori             while (access_len) {
32696d16c2f8Saliguori                 unsigned l;
32706d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
32716d16c2f8Saliguori                 if (l > access_len)
32726d16c2f8Saliguori                     l = access_len;
32736d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
32746d16c2f8Saliguori                     /* invalidate code */
32756d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
32766d16c2f8Saliguori                     /* set dirty bit */
32776d16c2f8Saliguori                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
32786d16c2f8Saliguori                         (0xff & ~CODE_DIRTY_FLAG);
32796d16c2f8Saliguori                 }
32806d16c2f8Saliguori                 addr1 += l;
32816d16c2f8Saliguori                 access_len -= l;
32826d16c2f8Saliguori             }
32836d16c2f8Saliguori         }
32846d16c2f8Saliguori         return;
32856d16c2f8Saliguori     }
32866d16c2f8Saliguori     if (is_write) {
32876d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
32886d16c2f8Saliguori     }
32896d16c2f8Saliguori     qemu_free(bounce.buffer);
32906d16c2f8Saliguori     bounce.buffer = NULL;
3291ba223c29Saliguori     cpu_notify_map_clients();
32926d16c2f8Saliguori }
3293d0ecd2aaSbellard 
32948df1cd07Sbellard /* warning: addr must be aligned */
32958df1cd07Sbellard uint32_t ldl_phys(target_phys_addr_t addr)
32968df1cd07Sbellard {
32978df1cd07Sbellard     int io_index;
32988df1cd07Sbellard     uint8_t *ptr;
32998df1cd07Sbellard     uint32_t val;
33008df1cd07Sbellard     unsigned long pd;
33018df1cd07Sbellard     PhysPageDesc *p;
33028df1cd07Sbellard 
33038df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33048df1cd07Sbellard     if (!p) {
33058df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33068df1cd07Sbellard     } else {
33078df1cd07Sbellard         pd = p->phys_offset;
33088df1cd07Sbellard     }
33098df1cd07Sbellard 
33102a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
33112a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
33128df1cd07Sbellard         /* I/O case */
33138df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33148da3ff18Spbrook         if (p)
33158da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33168df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
33178df1cd07Sbellard     } else {
33188df1cd07Sbellard         /* RAM case */
33195579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
33208df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
33218df1cd07Sbellard         val = ldl_p(ptr);
33228df1cd07Sbellard     }
33238df1cd07Sbellard     return val;
33248df1cd07Sbellard }
33258df1cd07Sbellard 
332684b7b8e7Sbellard /* warning: addr must be aligned */
332784b7b8e7Sbellard uint64_t ldq_phys(target_phys_addr_t addr)
332884b7b8e7Sbellard {
332984b7b8e7Sbellard     int io_index;
333084b7b8e7Sbellard     uint8_t *ptr;
333184b7b8e7Sbellard     uint64_t val;
333284b7b8e7Sbellard     unsigned long pd;
333384b7b8e7Sbellard     PhysPageDesc *p;
333484b7b8e7Sbellard 
333584b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
333684b7b8e7Sbellard     if (!p) {
333784b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
333884b7b8e7Sbellard     } else {
333984b7b8e7Sbellard         pd = p->phys_offset;
334084b7b8e7Sbellard     }
334184b7b8e7Sbellard 
33422a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
33432a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
334484b7b8e7Sbellard         /* I/O case */
334584b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33468da3ff18Spbrook         if (p)
33478da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
334884b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
334984b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
335084b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
335184b7b8e7Sbellard #else
335284b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
335384b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
335484b7b8e7Sbellard #endif
335584b7b8e7Sbellard     } else {
335684b7b8e7Sbellard         /* RAM case */
33575579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
335884b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
335984b7b8e7Sbellard         val = ldq_p(ptr);
336084b7b8e7Sbellard     }
336184b7b8e7Sbellard     return val;
336284b7b8e7Sbellard }
336384b7b8e7Sbellard 
3364aab33094Sbellard /* XXX: optimize */
3365aab33094Sbellard uint32_t ldub_phys(target_phys_addr_t addr)
3366aab33094Sbellard {
3367aab33094Sbellard     uint8_t val;
3368aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3369aab33094Sbellard     return val;
3370aab33094Sbellard }
3371aab33094Sbellard 
3372aab33094Sbellard /* XXX: optimize */
3373aab33094Sbellard uint32_t lduw_phys(target_phys_addr_t addr)
3374aab33094Sbellard {
3375aab33094Sbellard     uint16_t val;
3376aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3377aab33094Sbellard     return tswap16(val);
3378aab33094Sbellard }
3379aab33094Sbellard 
33808df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
33818df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
33828df1cd07Sbellard    bits are used to track modified PTEs */
33838df1cd07Sbellard void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
33848df1cd07Sbellard {
33858df1cd07Sbellard     int io_index;
33868df1cd07Sbellard     uint8_t *ptr;
33878df1cd07Sbellard     unsigned long pd;
33888df1cd07Sbellard     PhysPageDesc *p;
33898df1cd07Sbellard 
33908df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33918df1cd07Sbellard     if (!p) {
33928df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33938df1cd07Sbellard     } else {
33948df1cd07Sbellard         pd = p->phys_offset;
33958df1cd07Sbellard     }
33968df1cd07Sbellard 
33973a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
33988df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33998da3ff18Spbrook         if (p)
34008da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34018df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
34028df1cd07Sbellard     } else {
340374576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
34045579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
34058df1cd07Sbellard         stl_p(ptr, val);
340674576198Saliguori 
340774576198Saliguori         if (unlikely(in_migration)) {
340874576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
340974576198Saliguori                 /* invalidate code */
341074576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
341174576198Saliguori                 /* set dirty bit */
341274576198Saliguori                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
341374576198Saliguori                     (0xff & ~CODE_DIRTY_FLAG);
341474576198Saliguori             }
341574576198Saliguori         }
34168df1cd07Sbellard     }
34178df1cd07Sbellard }
34188df1cd07Sbellard 
3419bc98a7efSj_mayer void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3420bc98a7efSj_mayer {
3421bc98a7efSj_mayer     int io_index;
3422bc98a7efSj_mayer     uint8_t *ptr;
3423bc98a7efSj_mayer     unsigned long pd;
3424bc98a7efSj_mayer     PhysPageDesc *p;
3425bc98a7efSj_mayer 
3426bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3427bc98a7efSj_mayer     if (!p) {
3428bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3429bc98a7efSj_mayer     } else {
3430bc98a7efSj_mayer         pd = p->phys_offset;
3431bc98a7efSj_mayer     }
3432bc98a7efSj_mayer 
3433bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3434bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34358da3ff18Spbrook         if (p)
34368da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3437bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3438bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3439bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3440bc98a7efSj_mayer #else
3441bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3442bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3443bc98a7efSj_mayer #endif
3444bc98a7efSj_mayer     } else {
34455579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3446bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3447bc98a7efSj_mayer         stq_p(ptr, val);
3448bc98a7efSj_mayer     }
3449bc98a7efSj_mayer }
3450bc98a7efSj_mayer 
34518df1cd07Sbellard /* warning: addr must be aligned */
34528df1cd07Sbellard void stl_phys(target_phys_addr_t addr, uint32_t val)
34538df1cd07Sbellard {
34548df1cd07Sbellard     int io_index;
34558df1cd07Sbellard     uint8_t *ptr;
34568df1cd07Sbellard     unsigned long pd;
34578df1cd07Sbellard     PhysPageDesc *p;
34588df1cd07Sbellard 
34598df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
34608df1cd07Sbellard     if (!p) {
34618df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
34628df1cd07Sbellard     } else {
34638df1cd07Sbellard         pd = p->phys_offset;
34648df1cd07Sbellard     }
34658df1cd07Sbellard 
34663a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
34678df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34688da3ff18Spbrook         if (p)
34698da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34708df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
34718df1cd07Sbellard     } else {
34728df1cd07Sbellard         unsigned long addr1;
34738df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
34748df1cd07Sbellard         /* RAM case */
34755579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
34768df1cd07Sbellard         stl_p(ptr, val);
34773a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
34788df1cd07Sbellard             /* invalidate code */
34798df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
34808df1cd07Sbellard             /* set dirty bit */
3481f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3482f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
34838df1cd07Sbellard         }
34848df1cd07Sbellard     }
34853a7d929eSbellard }
34868df1cd07Sbellard 
3487aab33094Sbellard /* XXX: optimize */
3488aab33094Sbellard void stb_phys(target_phys_addr_t addr, uint32_t val)
3489aab33094Sbellard {
3490aab33094Sbellard     uint8_t v = val;
3491aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3492aab33094Sbellard }
3493aab33094Sbellard 
3494aab33094Sbellard /* XXX: optimize */
3495aab33094Sbellard void stw_phys(target_phys_addr_t addr, uint32_t val)
3496aab33094Sbellard {
3497aab33094Sbellard     uint16_t v = tswap16(val);
3498aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3499aab33094Sbellard }
3500aab33094Sbellard 
3501aab33094Sbellard /* XXX: optimize */
3502aab33094Sbellard void stq_phys(target_phys_addr_t addr, uint64_t val)
3503aab33094Sbellard {
3504aab33094Sbellard     val = tswap64(val);
3505aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3506aab33094Sbellard }
3507aab33094Sbellard 
350813eb76e0Sbellard #endif
350913eb76e0Sbellard 
35105e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3511b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3512b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
351313eb76e0Sbellard {
351413eb76e0Sbellard     int l;
35159b3c35e0Sj_mayer     target_phys_addr_t phys_addr;
35169b3c35e0Sj_mayer     target_ulong page;
351713eb76e0Sbellard 
351813eb76e0Sbellard     while (len > 0) {
351913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
352013eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
352113eb76e0Sbellard         /* if no physical page mapped, return an error */
352213eb76e0Sbellard         if (phys_addr == -1)
352313eb76e0Sbellard             return -1;
352413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
352513eb76e0Sbellard         if (l > len)
352613eb76e0Sbellard             l = len;
35275e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
35285e2972fdSaliguori #if !defined(CONFIG_USER_ONLY)
35295e2972fdSaliguori         if (is_write)
35305e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
35315e2972fdSaliguori         else
35325e2972fdSaliguori #endif
35335e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
353413eb76e0Sbellard         len -= l;
353513eb76e0Sbellard         buf += l;
353613eb76e0Sbellard         addr += l;
353713eb76e0Sbellard     }
353813eb76e0Sbellard     return 0;
353913eb76e0Sbellard }
354013eb76e0Sbellard 
35412e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
35422e70f6efSpbrook    must be at the end of the TB */
35432e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
35442e70f6efSpbrook {
35452e70f6efSpbrook     TranslationBlock *tb;
35462e70f6efSpbrook     uint32_t n, cflags;
35472e70f6efSpbrook     target_ulong pc, cs_base;
35482e70f6efSpbrook     uint64_t flags;
35492e70f6efSpbrook 
35502e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
35512e70f6efSpbrook     if (!tb) {
35522e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
35532e70f6efSpbrook                   retaddr);
35542e70f6efSpbrook     }
35552e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
35562e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
35572e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3558bf20dc07Sths        occurred.  */
35592e70f6efSpbrook     n = n - env->icount_decr.u16.low;
35602e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
35612e70f6efSpbrook     n++;
35622e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
35632e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3564bf20dc07Sths        the first instruction in a TB then re-execute the preceding
35652e70f6efSpbrook        branch.  */
35662e70f6efSpbrook #if defined(TARGET_MIPS)
35672e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
35682e70f6efSpbrook         env->active_tc.PC -= 4;
35692e70f6efSpbrook         env->icount_decr.u16.low++;
35702e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
35712e70f6efSpbrook     }
35722e70f6efSpbrook #elif defined(TARGET_SH4)
35732e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
35742e70f6efSpbrook             && n > 1) {
35752e70f6efSpbrook         env->pc -= 2;
35762e70f6efSpbrook         env->icount_decr.u16.low++;
35772e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
35782e70f6efSpbrook     }
35792e70f6efSpbrook #endif
35802e70f6efSpbrook     /* This should never happen.  */
35812e70f6efSpbrook     if (n > CF_COUNT_MASK)
35822e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
35832e70f6efSpbrook 
35842e70f6efSpbrook     cflags = n | CF_LAST_IO;
35852e70f6efSpbrook     pc = tb->pc;
35862e70f6efSpbrook     cs_base = tb->cs_base;
35872e70f6efSpbrook     flags = tb->flags;
35882e70f6efSpbrook     tb_phys_invalidate(tb, -1);
35892e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
35902e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
35912e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3592bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
35932e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
35942e70f6efSpbrook        repeating the fault, which is horribly inefficient.
35952e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
35962e70f6efSpbrook        second new TB.  */
35972e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
35982e70f6efSpbrook }
35992e70f6efSpbrook 
3600e3db7226Sbellard void dump_exec_info(FILE *f,
3601e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3602e3db7226Sbellard {
3603e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3604e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3605e3db7226Sbellard     TranslationBlock *tb;
3606e3db7226Sbellard 
3607e3db7226Sbellard     target_code_size = 0;
3608e3db7226Sbellard     max_target_code_size = 0;
3609e3db7226Sbellard     cross_page = 0;
3610e3db7226Sbellard     direct_jmp_count = 0;
3611e3db7226Sbellard     direct_jmp2_count = 0;
3612e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3613e3db7226Sbellard         tb = &tbs[i];
3614e3db7226Sbellard         target_code_size += tb->size;
3615e3db7226Sbellard         if (tb->size > max_target_code_size)
3616e3db7226Sbellard             max_target_code_size = tb->size;
3617e3db7226Sbellard         if (tb->page_addr[1] != -1)
3618e3db7226Sbellard             cross_page++;
3619e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3620e3db7226Sbellard             direct_jmp_count++;
3621e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3622e3db7226Sbellard                 direct_jmp2_count++;
3623e3db7226Sbellard             }
3624e3db7226Sbellard         }
3625e3db7226Sbellard     }
3626e3db7226Sbellard     /* XXX: avoid using doubles ? */
362757fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
362826a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
362926a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
363026a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
363126a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3632e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3633e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3634e3db7226Sbellard                 max_target_code_size);
3635e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3636e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3637e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3638e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3639e3db7226Sbellard             cross_page,
3640e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3641e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3642e3db7226Sbellard                 direct_jmp_count,
3643e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3644e3db7226Sbellard                 direct_jmp2_count,
3645e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
364657fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3647e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3648e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3649e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3650b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3651e3db7226Sbellard }
3652e3db7226Sbellard 
365361382a50Sbellard #if !defined(CONFIG_USER_ONLY)
365461382a50Sbellard 
365561382a50Sbellard #define MMUSUFFIX _cmmu
365661382a50Sbellard #define GETPC() NULL
365761382a50Sbellard #define env cpu_single_env
3658b769d8feSbellard #define SOFTMMU_CODE_ACCESS
365961382a50Sbellard 
366061382a50Sbellard #define SHIFT 0
366161382a50Sbellard #include "softmmu_template.h"
366261382a50Sbellard 
366361382a50Sbellard #define SHIFT 1
366461382a50Sbellard #include "softmmu_template.h"
366561382a50Sbellard 
366661382a50Sbellard #define SHIFT 2
366761382a50Sbellard #include "softmmu_template.h"
366861382a50Sbellard 
366961382a50Sbellard #define SHIFT 3
367061382a50Sbellard #include "softmmu_template.h"
367161382a50Sbellard 
367261382a50Sbellard #undef env
367361382a50Sbellard 
367461382a50Sbellard #endif
3675